You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

activation.py 22 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """activation"""
  16. import numpy as np
  17. from mindspore.ops import operations as P
  18. from mindspore.ops import functional as F
  19. from mindspore.ops import _selected_ops
  20. from mindspore.common.parameter import Parameter
  21. from mindspore.common.initializer import initializer
  22. from mindspore.common.tensor import Tensor
  23. from mindspore._extends import cell_attr_register
  24. from mindspore._checkparam import Validator as validator
  25. from ..cell import Cell
  26. __all__ = ['Softmax',
  27. 'LogSoftmax',
  28. 'ReLU',
  29. 'ReLU6',
  30. 'Tanh',
  31. 'GELU',
  32. 'FastGelu',
  33. 'Sigmoid',
  34. 'PReLU',
  35. 'get_activation',
  36. 'LeakyReLU',
  37. 'HSigmoid',
  38. 'HSwish',
  39. 'ELU',
  40. 'LogSigmoid',
  41. ]
  42. class Softmax(Cell):
  43. r"""
  44. Softmax activation function.
  45. Applies the Softmax function to an n-dimensional input Tensor.
  46. The input is a Tensor of logits transformed with exponential function and then
  47. normalized to lie in range [0, 1] and sum up to 1.
  48. Softmax is defined as:
  49. .. math::
  50. \text{softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_{j=0}^{n-1}\exp(x_j)},
  51. where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
  52. Args:
  53. axis (Union[int, tuple[int]]): The axis to apply Softmax operation, -1 means the last dimension. Default: -1.
  54. Inputs:
  55. - **x** (Tensor) - The input of Softmax with data type of float16 or float32.
  56. Outputs:
  57. Tensor, which has the same type and shape as `x` with values in the range[0,1].
  58. Raises:
  59. TypeError: If `axis` is neither an int not a tuple.
  60. TypeError: If dtype of `x` is neither float16 nor float32.
  61. ValueError: If `axis` is a tuple whose length is less than 1.
  62. ValueError: If `axis` is a tuple whose elements are not all in range [-len(x), len(x)).
  63. Supported Platforms:
  64. ``Ascend`` ``GPU`` ``CPU``
  65. Examples:
  66. >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
  67. >>> softmax = nn.Softmax()
  68. >>> output = softmax(input_x)
  69. >>> print(output)
  70. [0.03168 0.01166 0.0861 0.636 0.2341 ]
  71. """
  72. def __init__(self, axis=-1):
  73. super(Softmax, self).__init__()
  74. self.softmax = _selected_ops.Softmax(axis)
  75. def construct(self, x):
  76. return self.softmax(x)
  77. class LogSoftmax(Cell):
  78. r"""
  79. LogSoftmax activation function.
  80. Applies the LogSoftmax function to n-dimensional input tensor.
  81. The input is transformed by the Softmax function and then by the log function to lie in range[-inf,0).
  82. Logsoftmax is defined as:
  83. .. math::
  84. \text{logsoftmax}(x_i) = \log \left(\frac{\exp(x_i)}{\sum_{j=0}^{n-1} \exp(x_j)}\right),
  85. where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
  86. Args:
  87. axis (int): The axis to apply LogSoftmax operation, -1 means the last dimension. Default: -1.
  88. Inputs:
  89. - **x** (Tensor) - The input of LogSoftmax, with float16 or float32 data type.
  90. Outputs:
  91. Tensor, which has the same type and shape as the input as `x` with values in the range[-inf,0).
  92. Raises:
  93. TypeError: If `axis` is not an int.
  94. TypeError: If dtype of `x` is neither float16 nor float32.
  95. ValueError: If `axis` is not in range [-len(x), len(x)).
  96. Supported Platforms:
  97. ``Ascend`` ``GPU``
  98. Examples:
  99. >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
  100. >>> log_softmax = nn.LogSoftmax()
  101. >>> output = log_softmax(input_x)
  102. >>> print(output)
  103. [[-5.00672150e+00 -6.72150636e-03 -1.20067215e+01]
  104. [-7.00091219e+00 -1.40009127e+01 -9.12250078e-04]]
  105. """
  106. def __init__(self, axis=-1):
  107. super(LogSoftmax, self).__init__()
  108. self.log_softmax = _selected_ops.LogSoftmax(axis)
  109. def construct(self, x):
  110. return self.log_softmax(x)
  111. class ELU(Cell):
  112. r"""
  113. Exponential Linear Uint activation function.
  114. Applies the exponential linear unit function element-wise.
  115. The activation function is defined as:
  116. .. math::
  117. E_{i} =
  118. \begin{cases}
  119. x, &\text{if } x \geq 0; \cr
  120. \text{alpha} * (\exp(x_i) - 1), &\text{otherwise.}
  121. \end{cases}
  122. The picture about ELU looks like this `ELU <https://en.wikipedia.org/wiki/
  123. Activation_function#/media/File:Activation_elu.svg>`_.
  124. Args:
  125. alpha (float): The coefficient of negative factor whose type is float. Default: 1.0.
  126. Inputs:
  127. - **input_data** (Tensor) - The input of ELU with data type of float16 or float32.
  128. Outputs:
  129. Tensor, with the same type and shape as the `input_data`.
  130. Raises:
  131. TypeError: If `alpha` is not a float.
  132. TypeError: If dtype of `input_data` is neither float16 nor float32.
  133. ValueError: If `alpha` is not equal to 1.0.
  134. Supported Platforms:
  135. ``Ascend`` ``GPU`` ``CPU``
  136. Examples:
  137. >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float32)
  138. >>> elu = nn.ELU()
  139. >>> result = elu(input_x)
  140. >>> print(result)
  141. [-0.63212055 -0.86466473 0. 2. 1.]
  142. """
  143. def __init__(self, alpha=1.0):
  144. super(ELU, self).__init__()
  145. self.elu = P.Elu(alpha)
  146. def construct(self, x):
  147. return self.elu(x)
  148. class ReLU(Cell):
  149. r"""
  150. Rectified Linear Unit activation function.
  151. Applies the rectified linear unit function element-wise.
  152. .. math::
  153. \text{ReLU}(x) = (x)^+ = \max(0, x),
  154. It returns element-wise :math:`\max(0, x)`, specially, the neurons with the negative output
  155. will be suppressed and the active neurons will stay the same.
  156. The picture about ReLU looks like this `ReLU <https://en.wikipedia.org/wiki/
  157. Activation_function#/media/File:Activation_rectified_linear.svg>`_.
  158. Inputs:
  159. - **input_data** (Tensor) - The input of ReLU.
  160. Outputs:
  161. Tensor, with the same type and shape as the `input_data`.
  162. Raises:
  163. TypeError: If dtype of `input_data` is not a number.
  164. Supported Platforms:
  165. ``Ascend`` ``GPU`` ``CPU``
  166. Examples:
  167. >>> input_x = Tensor(np.array([-1, 2, -3, 2, -1]), mindspore.float16)
  168. >>> relu = nn.ReLU()
  169. >>> output = relu(input_x)
  170. >>> print(output)
  171. [0. 2. 0. 2. 0.]
  172. """
  173. def __init__(self):
  174. super(ReLU, self).__init__()
  175. self.relu = P.ReLU()
  176. def construct(self, x):
  177. return self.relu(x)
  178. class ReLU6(Cell):
  179. r"""
  180. Compute ReLU6 activation function.
  181. ReLU6 is similar to ReLU with a upper limit of 6, which if the inputs are greater than 6, the outputs
  182. will be suppressed to 6.
  183. It computes element-wise as
  184. .. math::
  185. \min(\max(0, x), 6).
  186. The input is a Tensor of any valid shape.
  187. Inputs:
  188. - **input_data** (Tensor) - The input of ReLU6 with data type of float16 or float32.
  189. Outputs:
  190. Tensor, which has the same type as `input_data`.
  191. Raises:
  192. TypeError: If dtype of `input_data` is neither float16 nor float32.
  193. Supported Platforms:
  194. ``Ascend`` ``GPU`` ``CPU``
  195. Examples:
  196. >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
  197. >>> relu6 = nn.ReLU6()
  198. >>> output = relu6(input_x)
  199. >>> print(output)
  200. [0. 0. 0. 2. 1.]
  201. """
  202. def __init__(self):
  203. super(ReLU6, self).__init__()
  204. self.relu6 = P.ReLU6()
  205. def construct(self, x):
  206. return self.relu6(x)
  207. class LeakyReLU(Cell):
  208. r"""
  209. Leaky ReLU activation function.
  210. LeakyReLU is similar to ReLU, but LeakyReLU has a slope that makes it not equal to 0 at x < 0.
  211. The activation function is defined as:
  212. .. math::
  213. \text{leaky_relu}(x) = \begin{cases}x, &\text{if } x \geq 0; \cr
  214. \text{alpha} * x, &\text{otherwise.}\end{cases}
  215. See https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf
  216. Args:
  217. alpha (Union[int, float]): Slope of the activation function at x < 0. Default: 0.2.
  218. Inputs:
  219. - **input_x** (Tensor) - The input of LeakyReLU.
  220. Outputs:
  221. Tensor, has the same type and shape as the `input_x`.
  222. Raises:
  223. TypeError: If `alpha` is not a float or an int.
  224. Supported Platforms:
  225. ``Ascend`` ``GPU``
  226. Examples:
  227. >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
  228. >>> leaky_relu = nn.LeakyReLU()
  229. >>> output = leaky_relu(input_x)
  230. >>> print(output)
  231. [[-0.2 4. -1.6]
  232. [ 2. -1. 9. ]]
  233. """
  234. def __init__(self, alpha=0.2):
  235. super(LeakyReLU, self).__init__()
  236. validator.check_value_type('alpha', alpha, [float, int], self.cls_name)
  237. self.greater_equal = P.GreaterEqual()
  238. self.mul = P.Mul()
  239. self.alpha = alpha
  240. def construct(self, x):
  241. alpha_array = P.Cast()(F.scalar_to_array(self.alpha), P.DType()(x))
  242. if self.alpha <= 1:
  243. out = P.Maximum()(alpha_array * x, x)
  244. else:
  245. out = P.Minimum()(alpha_array * x, x)
  246. return out
  247. class Tanh(Cell):
  248. r"""
  249. Tanh activation function.
  250. Applies the Tanh function element-wise, returns a new tensor with the hyperbolic tangent of the elements of input,
  251. The input is a Tensor with any valid shape.
  252. Tanh function is defined as:
  253. .. math::
  254. tanh(x_i) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1},
  255. where :math:`x_i` is an element of the input Tensor.
  256. Inputs:
  257. - **input_data** (Tensor) - The input of Tanh with data type of float16 or float32.
  258. Outputs:
  259. Tensor, with the same type and shape as the `input_data`.
  260. Raises:
  261. TypeError: If dtype of `input_data` is neither float16 nor float32.
  262. Supported Platforms:
  263. ``Ascend`` ``GPU`` ``CPU``
  264. Examples:
  265. >>> input_x = Tensor(np.array([1, 2, 3, 2, 1]), mindspore.float16)
  266. >>> tanh = nn.Tanh()
  267. >>> output = tanh(input_x)
  268. >>> print(output)
  269. [0.7617 0.964 0.995 0.964 0.7617]
  270. """
  271. def __init__(self):
  272. super(Tanh, self).__init__()
  273. self.tanh = _selected_ops.Tanh()
  274. def construct(self, x):
  275. return self.tanh(x)
  276. class GELU(Cell):
  277. r"""
  278. Gaussian error linear unit activation function.
  279. Applies GELU function to each element of the input. The input is a Tensor with any valid shape.
  280. GELU is defined as:
  281. .. math::
  282. GELU(x_i) = x_i*P(X < x_i),
  283. where :math:`P` is the cumulative distribution function
  284. of standard Gaussian distribution and :math:`x_i` is the element of the input.
  285. The picture about GELU looks like this `GELU <https://en.wikipedia.org/wiki/
  286. Activation_function#/media/File:Activation_gelu.png>`_.
  287. Inputs:
  288. - **input_data** (Tensor) - The input of GELU with data type of float16 or float32.
  289. Outputs:
  290. Tensor, with the same type and shape as the `input_data`.
  291. Raises:
  292. TypeError: If dtype of `input_data` is neither float16 nor float32.
  293. Supported Platforms:
  294. ``Ascend`` ``GPU``
  295. Examples:
  296. >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
  297. >>> gelu = nn.GELU()
  298. >>> output = gelu(input_x)
  299. >>> print(output)
  300. [[-1.5880802e-01 3.9999299e+00 -3.1077917e-21]
  301. [ 1.9545976e+00 -2.2918017e-07 9.0000000e+00]]
  302. """
  303. def __init__(self):
  304. super(GELU, self).__init__()
  305. self.gelu = _selected_ops.Gelu()
  306. def construct(self, x):
  307. return self.gelu(x)
  308. class FastGelu(Cell):
  309. r"""
  310. Fast Gaussian error linear unit activation function.
  311. Applies FastGelu function to each element of the input. The input is a Tensor with any valid shape.
  312. FastGelu is defined as:
  313. .. math::
  314. FastGelu(x_i) = \frac {x_i} {1 + \exp(-1.702 * \left| x_i \right|)} *
  315. \exp(0.851 * (x_i - \left| x_i \right|))
  316. where :math:`x_i` is the element of the input.
  317. Inputs:
  318. - **input_data** (Tensor) - The input of FastGelu with data type of float16 or float32.
  319. Outputs:
  320. Tensor, with the same type and shape as the `input_data`.
  321. Raises:
  322. TypeError: If dtype of `input_data` is neither float16 nor float32.
  323. Supported Platforms:
  324. ``Ascend``
  325. Examples:
  326. >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
  327. >>> fast_gelu = nn.FastGelu()
  328. >>> output = fast_gelu(input_x)
  329. >>> print(output)
  330. [[-1.5420423e-01 3.9955850e+00 -9.7664279e-06]
  331. [ 1.9356586e+00 -1.0070159e-03 8.9999981e+00]]
  332. """
  333. def __init__(self):
  334. super(FastGelu, self).__init__()
  335. self.fast_gelu = _selected_ops.FastGelu()
  336. def construct(self, x):
  337. return self.fast_gelu(x)
  338. class Sigmoid(Cell):
  339. r"""
  340. Sigmoid activation function.
  341. Applies sigmoid-type activation element-wise.
  342. Sigmoid function is defined as:
  343. .. math::
  344. \text{sigmoid}(x_i) = \frac{1}{1 + \exp(-x_i)},
  345. where :math:`x_i` is the element of the input.
  346. The picture about Sigmoid looks like this `Sigmoid <https://en.wikipedia.org/wiki/
  347. Sigmoid_function#/media/File:Logistic-curve.svg>`_.
  348. Inputs:
  349. - **input_data** (Tensor) - The input of Sigmoid with data type of float16 or float32.
  350. Outputs:
  351. Tensor, with the same type and shape as the `input_data`.
  352. Raises:
  353. TypeError: If dtype of `input_data` is neither float16 nor float32.
  354. Supported Platforms:
  355. ``Ascend`` ``GPU`` ``CPU``
  356. Examples:
  357. >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
  358. >>> sigmoid = nn.Sigmoid()
  359. >>> output = sigmoid(input_x)
  360. >>> print(output)
  361. [0.2688 0.11914 0.5 0.881 0.7305 ]
  362. """
  363. def __init__(self):
  364. super(Sigmoid, self).__init__()
  365. self.sigmoid = P.Sigmoid()
  366. def construct(self, x):
  367. return self.sigmoid(x)
  368. class PReLU(Cell):
  369. r"""
  370. PReLU activation function.
  371. Applies the PReLU function element-wise.
  372. PReLU is defined as:
  373. .. math::
  374. prelu(x_i)= \max(0, x_i) + w * \min(0, x_i),
  375. where :math:`x_i` is an element of an channel of the input.
  376. Here :math:`w` is a learnable parameter with a default initial value 0.25.
  377. Parameter :math:`w` has dimensionality of the argument channel. If called without argument
  378. channel, a single parameter :math:`w` will be shared across all channels.
  379. The picture about PReLU looks like this `PReLU <https://en.wikipedia.org/wiki/
  380. Activation_function#/media/File:Activation_prelu.svg>`_.
  381. Args:
  382. channel (int): The dimension of input. Default: 1.
  383. w (Union[float, list, Tensor]): The initial value of w. Default: 0.25.
  384. Inputs:
  385. - **input_data** (Tensor) - The input of PReLU with data type of float16 or float32.
  386. Outputs:
  387. Tensor, with the same type and shape as the `input_data`.
  388. Raises:
  389. TypeError: If `channel` is not an int.
  390. TypeError: If `w` is not one of float, list, Tensor.
  391. TypeError: If dtype of `input_data` is neither float16 nor float32.
  392. ValueError: If `channel` is less than 1.
  393. ValueError: If length of shape of `input_data` is equal to 1.
  394. Supported Platforms:
  395. ``Ascend``
  396. Examples:
  397. >>> input_x = Tensor(np.array([[[[0.1, 0.6], [0.9, 0.9]]]]), mindspore.float32)
  398. >>> prelu = nn.PReLU()
  399. >>> output = prelu(input_x)
  400. >>> print(output)
  401. [[[[0.1 0.6]
  402. [0.9 0.9]]]]
  403. """
  404. @cell_attr_register(attrs="")
  405. def __init__(self, channel=1, w=0.25):
  406. super(PReLU, self).__init__()
  407. validator.check_positive_int(channel, 'channel', self.cls_name)
  408. if isinstance(w, (np.float32, float)):
  409. tmp = np.empty((channel,), dtype=np.float32)
  410. tmp.fill(w)
  411. w = Tensor(tmp)
  412. elif isinstance(w, list):
  413. w = Tensor(w)
  414. if not isinstance(w, Tensor):
  415. raise TypeError("w only support np.float32, float, list or Tensor type.")
  416. self.w = Parameter(initializer(w, [channel]), name='a')
  417. self.prelu = P.PReLU()
  418. self.relu = P.ReLU()
  419. self.assign = P.Assign()
  420. def construct(self, x):
  421. u = self.relu(self.w)
  422. v = self.prelu(x, u)
  423. if self.training:
  424. self.assign(self.w, u)
  425. return v
  426. class HSwish(Cell):
  427. r"""
  428. Hard swish activation function.
  429. Applies hswish-type activation element-wise. The input is a Tensor with any valid shape.
  430. Hard swish is defined as:
  431. .. math::
  432. \text{hswish}(x_{i}) = x_{i} * \frac{ReLU6(x_{i} + 3)}{6},
  433. where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
  434. Inputs:
  435. - **input_data** (Tensor) - The input of HSwish, data type must be float16 or float32.
  436. Outputs:
  437. Tensor, with the same type and shape as the `input_data`.
  438. Raises:
  439. TypeError: If dtype of `input_data` is neither float16 nor float32.
  440. Supported Platforms:
  441. ``GPU``
  442. Examples:
  443. >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
  444. >>> hswish = nn.HSwish()
  445. >>> result = hswish(input_x)
  446. >>> print(result)
  447. [-0.3333 -0.3333 0 1.666 0.6665]
  448. """
  449. def __init__(self):
  450. super(HSwish, self).__init__()
  451. self.hswish = P.HSwish()
  452. def construct(self, x):
  453. return self.hswish(x)
  454. class HSigmoid(Cell):
  455. r"""
  456. Hard sigmoid activation function.
  457. Applies hard sigmoid activation element-wise. The input is a Tensor with any valid shape.
  458. Hard sigmoid is defined as:
  459. .. math::
  460. \text{hsigmoid}(x_{i}) = max(0, min(1, \frac{x_{i} + 3}{6})),
  461. where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.
  462. Inputs:
  463. - **input_data** (Tensor) - The input of HSigmoid, data type must be float16 or float32.
  464. Outputs:
  465. Tensor, with the same type and shape as the `input_data`.
  466. Raises:
  467. TypeError: If dtype of `input_data` is neither float16 nor float32.
  468. Supported Platforms:
  469. ``GPU``
  470. Examples:
  471. >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
  472. >>> hsigmoid = nn.HSigmoid()
  473. >>> result = hsigmoid(input_x)
  474. >>> print(result)
  475. [0.3333 0.1666 0.5 0.833 0.6665]
  476. """
  477. def __init__(self):
  478. super(HSigmoid, self).__init__()
  479. self.hsigmoid = P.HSigmoid()
  480. def construct(self, x):
  481. return self.hsigmoid(x)
  482. class LogSigmoid(Cell):
  483. r"""
  484. Logsigmoid activation function.
  485. Applies logsigmoid activation element-wise. The input is a Tensor with any valid shape.
  486. Logsigmoid is defined as:
  487. .. math::
  488. \text{logsigmoid}(x_{i}) = log(\frac{1}{1 + \exp(-x_i)}),
  489. where :math:`x_{i}` is the element of the input.
  490. Inputs:
  491. - **input_data** (Tensor) - The input of LogSigmoid with data type of float16 or float32.
  492. Outputs:
  493. Tensor, with the same type and shape as the `input_data`.
  494. Raises:
  495. TypeError: If dtype of `input_data` is neither float16 nor float32.
  496. Supported Platforms:
  497. ``Ascend`` ``GPU``
  498. Examples:
  499. >>> net = nn.LogSigmoid()
  500. >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
  501. >>> output = net(input_x)
  502. >>> print(output)
  503. [-0.31326166 -0.12692806 -0.04858734]
  504. """
  505. def __init__(self):
  506. super(LogSigmoid, self).__init__()
  507. self.mul = P.Mul()
  508. self.exp = P.Exp()
  509. self.add = P.Add()
  510. self.rec = P.Reciprocal()
  511. self.log = P.Log()
  512. def construct(self, input_x):
  513. neg_input = self.mul(input_x, -1)
  514. exp_neg_input = self.exp(neg_input)
  515. exp_neg_input_1 = self.add(exp_neg_input, 1)
  516. rec_exp_neg_input_1 = self.rec(exp_neg_input_1)
  517. ret = self.log(rec_exp_neg_input_1)
  518. return ret
  519. _activation = {
  520. 'softmax': Softmax,
  521. 'logsoftmax': LogSoftmax,
  522. 'relu': ReLU,
  523. 'relu6': ReLU6,
  524. 'tanh': Tanh,
  525. 'gelu': GELU,
  526. 'fast_gelu': FastGelu,
  527. 'elu': ELU,
  528. 'sigmoid': Sigmoid,
  529. 'prelu': PReLU,
  530. 'leakyrelu': LeakyReLU,
  531. 'hswish': HSwish,
  532. 'hsigmoid': HSigmoid,
  533. 'logsigmoid': LogSigmoid,
  534. }
  535. def get_activation(name):
  536. """
  537. Gets the activation function.
  538. Args:
  539. name (str): The name of the activation function.
  540. Returns:
  541. Function, the activation function.
  542. Examples:
  543. >>> sigmoid = nn.get_activation('sigmoid')
  544. """
  545. if name is None:
  546. return None
  547. if name not in _activation:
  548. raise KeyError(f"Unknown activation type '{name}'")
  549. return _activation[name]()