You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

math_ops.py 96 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """math operations, the function docs are adapted from Numpy API."""
  16. import operator
  17. import functools
  18. from ..ops import operations as P
  19. from ..ops import functional as F
  20. from ..ops import composite as C
  21. from ..ops.primitive import constexpr
  22. from ..common import dtype as mstype
  23. from ..common import Tensor
  24. from .dtypes import nan, pi
  25. from .array_creations import asarray_const, ones, zeros, empty, full, full_like
  26. from .array_ops import where as where_
  27. from .array_ops import ravel, expand_dims
  28. from .utils_const import _infer_out_shape, _check_axis_valid, _get_device, \
  29. _check_shape_aligned, _raise_type_error, _check_same_type, _check_is_float, \
  30. _raise_value_error, _check_matmul_shapes, _promote, _check_axis_type, _canonicalize_axis, \
  31. _max, _is_shape_empty, _check_is_int, _expanded_shape, _check_axis_in_range
  32. from .utils import _is_scalar, _expand, _broadcast_to, _broadcast_to_shape, _get_size, \
  33. _check_input_tensor
  34. ZERO_TENSOR = asarray_const(0)
  35. _mean_default = P.ReduceMean()
  36. _mean_keepdims = P.ReduceMean(True)
  37. _matmul = P.MatMul(False, False)
  38. _matmul_T = P.MatMul(False, True)
  39. _reduce_sum_default = P.ReduceSum()
  40. _reduce_sum_keepdims = P.ReduceSum(True)
  41. _reduce_min_default = P.ReduceMin()
  42. _reduce_min_keepdims = P.ReduceMin(True)
  43. _reduce_max_default = P.ReduceMax()
  44. _reduce_max_keepdims = P.ReduceMax(True)
  45. _cumsum_default = P.CumSum()
  46. def absolute(x, out=None, where=True, dtype=None):
  47. """
  48. Calculates the absolute value element-wise.
  49. Note:
  50. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  51. not supported.
  52. When `where` is provided, `out` must have a tensor value. `out` is not supported
  53. for storing the result, however it can be used in combination with `where` to set
  54. the value at indices for which `where` is set to False.
  55. Currently the backend kernel only supports float calculation, if the input
  56. is not a `float`, then it will be casted to :class:`mstype.float32` and casted back.
  57. Args:
  58. x (Tensor): Tensor to be used for calculation.
  59. out (Tensor or None, optional): defaults to None.
  60. where (Tensor or None, optional): For any non-default value of type other
  61. than :class:`Tensor` or :class:`None`, the output retains its original value.
  62. This condition is broadcasted over the input. At locations where the
  63. condition is `True`, the out array will be set to the ufunc result.
  64. Elsewhere, the out array will retain its original value. Note that
  65. if an uninitialized out array is created via the default ``out=None``,
  66. locations within it where the condition is `False` will remain
  67. uninitialized.
  68. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  69. output Tensor.
  70. Returns:
  71. Tensor.
  72. Raises:
  73. TypeError: If input arguments have types not specified above.
  74. Supported Platforms:
  75. ``Ascend`` ``GPU`` ``CPU``
  76. Examples:
  77. >>> import mindspore.numpy as np
  78. >>> x = np.asarray([1, 2, 3, -4, -5], np.float32)
  79. >>> output = np.absolute(x)
  80. >>> print(output)
  81. [1. 2. 3. 4. 5.]
  82. """
  83. original_dtype = x.dtype
  84. if not _check_is_float(original_dtype) and dtype is None:
  85. x = x.astype(mstype.float32)
  86. return _apply_tensor_op(F.absolute, x, out=out, where=where, dtype=dtype).astype(original_dtype)
  87. return _apply_tensor_op(F.absolute, x, out=out, where=where, dtype=dtype)
  88. def count_nonzero(x, axis=None, keepdims=False):
  89. """
  90. Counts the number of non-zero values in the tensor `x`.
  91. Args:
  92. x (Tensor): The tensor for which to count non-zeros.
  93. axis (Union[int,tuple], optional): Axis or tuple of axes along which to
  94. count non-zeros. Default is None, meaning that non-zeros will be counted
  95. along a flattened version of `x`.
  96. keepdims (bool, optional): If this is set to True, the axes that are counted
  97. are left in the result as dimensions with size one. With this option,
  98. the result will broadcast correctly against `x`.
  99. Returns:
  100. Tensor, indicating number of non-zero values in the `x` along a given axis.
  101. Otherwise, the total number of non-zero values in `x` is returned.
  102. Raises:
  103. TypeError: if the input is not a tensor.
  104. Supported Platforms:
  105. ``Ascend`` ``GPU`` ``CPU``
  106. Examples:
  107. >>> import mindspore.numpy as np
  108. >>> x = np.asarray([1, 2, 3, -4, 0, 3, 2, 0])
  109. >>> output = np.count_nonzero(x)
  110. >>> print(output)
  111. 6
  112. """
  113. if _is_shape_empty(x.shape):
  114. return ZERO_TENSOR
  115. if axis is None:
  116. axis = ()
  117. return C.count_nonzero(x=x, axis=axis, keep_dims=keepdims)
  118. def clip(x, xmin, xmax, out=None, where=True, dtype=None):
  119. """
  120. Clips (limits) the values in an array.
  121. Given an interval, values outside the interval are clipped to the interval edges.
  122. For example, if an interval of :math:`[0, 1]` is specified, values smaller than 0 become 0,
  123. and values larger than 1 become 1.
  124. Args:
  125. x (Tensor): Tensor containing elements to clip.
  126. xmin (Tensor, scalar, None): Minimum value. If None, clipping is not performed
  127. on lower interval edge. Not more than one of `xmin` and `xmax` may be None.
  128. xmax (Tensor, scalar, None): Maximum value. If None, clipping is not performed
  129. on upper interval edge. Not more than one of `xmin` and `xmax` may be None.
  130. If `xmin` or `xmax` are tensors, then the three tensors will be broadcasted
  131. to match their shapes.
  132. out (Tensor or None): optional, default to None.
  133. where (Tensor or None, optional): For any non-default value of type other
  134. than :class:`Tensor` or :class:`None`, the output retains its original value.
  135. This condition is broadcasted over the input. At locations where the
  136. condition is `True`, the out array will be set to the ufunc result.
  137. Elsewhere, the out array will retain its original value. Note that
  138. if an uninitialized out array is created via the default ``out=None``,
  139. locations within it where the condition is `False` will remain
  140. uninitialized.
  141. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  142. output Tensor.
  143. Returns:
  144. Tensor, a tensor with the elements of `x`, but where values
  145. < `xmin` are replaced with `xmin`, and those > `xmax` with `xmax`.
  146. Supported Platforms:
  147. ``Ascend`` ``GPU`` ``CPU``
  148. Examples:
  149. >>> import mindspore.numpy as np
  150. >>> x = np.asarray([1, 2, 3, -4, 0, 3, 2, 0])
  151. >>> output = np.clip(x, 0, 2)
  152. >>> print(output)
  153. [1 2 2 0 0 2 2 0]
  154. """
  155. if xmin is None and xmax is None:
  156. _raise_value_error("One of max or min must be given.")
  157. if xmin is not None:
  158. x = maximum(x, xmin, out=out, where=where, dtype=dtype)
  159. if xmax is not None:
  160. x = minimum(x, xmax, out=out, where=where, dtype=dtype)
  161. return x
  162. def deg2rad(x, out=None, where=True, dtype=None):
  163. """
  164. Converts angles from degrees to radians.
  165. Args:
  166. x (Tensor): Angles in degrees.
  167. out (Tensor or None, optional): defaults to None.
  168. where (Tensor or None, optional): For any non-default value of type other
  169. than :class:`Tensor` or :class:`None`, the output retains its original value.
  170. This condition is broadcasted over the input. At locations where the
  171. condition is `True`, the out array will be set to the ufunc result.
  172. Elsewhere, the out array will retain its original value. Note that
  173. if an uninitialized out array is created via the default ``out=None``,
  174. locations within it where the condition is `False` will remain
  175. uninitialized.
  176. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  177. output Tensor.
  178. Returns:
  179. Tensor, the corresponding angle in radians. This is a tensor scalar if `x`
  180. is a tensor scalar.
  181. Raises:
  182. TypeError: if `x` is not a tensor.
  183. Supported Platforms:
  184. ``Ascend`` ``GPU`` ``CPU``
  185. Examples:
  186. >>> import mindspore.numpy as np
  187. >>> x = np.asarray([1, 2, 3, -4, -5])
  188. >>> output = np.deg2rad(x)
  189. >>> print(output)
  190. [ 0.01745329 0.03490658 0.05235988 -0.06981317 -0.08726647]
  191. """
  192. _check_input_tensor(x)
  193. def convert(a):
  194. return a * pi / 180.0
  195. return _apply_tensor_op(convert, x, out=out, where=where, dtype=dtype)
  196. def rad2deg(x, out=None, where=True, dtype=None):
  197. """
  198. Converts angles from radians to degrees.
  199. Args:
  200. x (Tensor): Angles in radians.
  201. out (Tensor or None, optional): defaults to None.
  202. where (Tensor or None, optional): For any non-default value of type other
  203. than :class:`Tensor` or :class:`None`, the output retains its original value.
  204. This condition is broadcasted over the input. At locations where the
  205. condition is `True`, the out array will be set to the ufunc result.
  206. Elsewhere, the out array will retain its original value. Note that
  207. if an uninitialized out array is created via the default ``out=None``,
  208. locations within it where the condition is `False` will remain
  209. uninitialized.
  210. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  211. output Tensor.
  212. Returns:
  213. Tensor, the corresponding angle in degrees. This is a tensor scalar if `x`
  214. is a tensor scalar.
  215. Raises:
  216. TypeError: if the input is not a tensor.
  217. Supported Platforms:
  218. ``Ascend`` ``GPU`` ``CPU``
  219. Examples:
  220. >>> x = np.asarray([1, 2, 3, -4, -5])
  221. >>> output = np.rad2deg(x)
  222. >>> print(output)
  223. [ 57.295776 114.59155 171.88733 -229.1831 -286.47888 ]
  224. """
  225. _check_input_tensor(x)
  226. def convert(a):
  227. return a * 180.0 / pi
  228. return _apply_tensor_op(convert, x, out=out, where=where, dtype=dtype)
  229. def add(x1, x2, out=None, where=True, dtype=None):
  230. """
  231. Adds arguments element-wise.
  232. Note:
  233. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  234. not supported.
  235. When `where` is provided, `out` must have a tensor value. `out` is not supported
  236. for storing the result, however it can be used in combination with `where` to set
  237. the value at indices for which `where` is set to False.
  238. Args:
  239. x1 (Tensor): input to be added.
  240. x2 (Tensor): input to be added.
  241. out (Tensor or None, optional): defaults to None.
  242. where (Tensor or None, optional): For any non-default value of type other
  243. than :class:`Tensor` or :class:`None`, the output retains its original value.
  244. This condition is broadcasted over the input. At locations where the
  245. condition is `True`, the out array will be set to the ufunc result.
  246. Elsewhere, the out array will retain its original value. Note that
  247. if an uninitialized out array is created via the default ``out=None``,
  248. locations within it where the condition is `False` will remain
  249. uninitialized.
  250. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  251. output Tensor.
  252. Returns:
  253. Tensor or scalar, the sum of `x1` and `x2`, element-wise. This is a scalar
  254. if both `x1` and `x2` are scalars.
  255. Raises:
  256. TypeError: if the input is not a tensor.
  257. Supported Platforms:
  258. ``Ascend`` ``GPU`` ``CPU``
  259. Examples:
  260. >>> x1 = np.full((3, 2), [1, 2])
  261. >>> x2 = np.full((3, 2), [3, 4])
  262. >>> output = np.add(x1, x2)
  263. >>> print(output)
  264. [[4, 6],
  265. [4, 6],
  266. [4, 6]]
  267. """
  268. # broadcast is not fully supported in tensor_add on CPU,
  269. # so we use tensor_sub as a substitute solution
  270. if _get_device() == 'CPU':
  271. _check_input_tensor(x1, x2)
  272. return subtract(x1, F.neg_tensor(x2), out=out, where=where, dtype=dtype)
  273. return _apply_tensor_op(F.tensor_add, x1, x2, out=out, where=where, dtype=dtype)
  274. def subtract(x1, x2, out=None, where=True, dtype=None):
  275. """
  276. Subtracts arguments, element-wise.
  277. Note:
  278. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  279. not supported.
  280. When `where` is provided, `out` must have a tensor value. `out` is not supported
  281. for storing the result, however it can be used in combination with `where` to set
  282. the value at indices for which `where` is set to False.
  283. Args:
  284. x1 (Tensor): the input to be subtracted from.
  285. x2 (Tensor): the input to be subtracted by.
  286. out (Tensor or None, optional): defaults to None.
  287. where (Tensor or None, optional): For any non-default value of type other
  288. than :class:`Tensor` or :class:`None`, the output retains its original value.
  289. This condition is broadcasted over the input. At locations where the
  290. condition is `True`, the out array will be set to the ufunc result.
  291. Elsewhere, the out array will retain its original value. Note that
  292. if an uninitialized out array is created via the default ``out=None``,
  293. locations within it where the condition is `False` will remain
  294. uninitialized.
  295. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  296. output Tensor.
  297. Returns:
  298. Tensor or scalar, the difference of `x1` and `x2`, element-wise. This is a
  299. scalar if both `x1` and `x2` are scalars.
  300. Raises:
  301. TypeError: if the input is not a tensor.
  302. Supported Platforms:
  303. ``Ascend`` ``GPU`` ``CPU``
  304. Examples:
  305. >>> x1 = np.full((3, 2), [1, 2])
  306. >>> x2 = np.full((3, 2), [3, 4])
  307. >>> output = np.subtract(x1, x2)
  308. >>> print(output)
  309. [[-2, -2],
  310. [-2, -2],
  311. [-2, -2]]
  312. """
  313. return _apply_tensor_op(F.tensor_sub, x1, x2, out=out, where=where, dtype=dtype)
  314. def multiply(x1, x2, out=None, where=True, dtype=None):
  315. """
  316. Multiplies arguments element-wise.
  317. Note:
  318. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  319. not supported.
  320. When `where` is provided, `out` must have a tensor value. `out` is not supported
  321. for storing the result, however it can be used in combination with `where` to set
  322. the value at indices for which `where` is set to False.
  323. Args:
  324. x1 (Tensor): input tensor to be multiplied.
  325. x2 (Tensor): input tensor to be multiplied.
  326. out (Tensor or None, optional): defaults to None.
  327. where (Tensor or None, optional): For any non-default value of type other
  328. than :class:`Tensor` or :class:`None`, the output retains its original value.
  329. This condition is broadcasted over the input. At locations where the
  330. condition is `True`, the out array will be set to the ufunc result.
  331. Elsewhere, the out array will retain its original value. Note that
  332. if an uninitialized out array is created via the default ``out=None``,
  333. locations within it where the condition is `False` will remain
  334. uninitialized.
  335. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  336. output Tensor.
  337. Returns:
  338. Tensor or scalar, the product of `x1` and `x2`, element-wise. This is a scalar
  339. if both `x1` and `x2` are scalars.
  340. Raises:
  341. TypeError: if the input is not a tensor.
  342. Supported Platforms:
  343. ``Ascend`` ``GPU`` ``CPU``
  344. Examples:
  345. >>> x1 = np.full((3, 2), [1, 2])
  346. >>> x2 = np.full((3, 2), [3, 4])
  347. >>> output = np.multiply(x1, x2)
  348. >>> print(output)
  349. [[3, 8],
  350. [3, 8],
  351. [3, 8]]
  352. """
  353. if _get_device() == 'CPU':
  354. _check_input_tensor(x1, x2)
  355. # broadcast is not fully supported on CPU backend,
  356. # and explicit broadcasting is performed
  357. shape_out = _infer_out_shape(F.shape(x1), F.shape(x2))
  358. x1 = _broadcast_to_shape(x1, shape_out)
  359. x2 = _broadcast_to_shape(x2, shape_out)
  360. return _apply_tensor_op(F.tensor_mul, x1, x2, out=out, where=where, dtype=dtype)
  361. def divide(x1, x2, out=None, where=True, dtype=None):
  362. """
  363. Returns a true division of the inputs, element-wise.
  364. Instead of the Python traditional ‘floor division’, this returns a true
  365. division.
  366. Note:
  367. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  368. not supported.
  369. When `where` is provided, `out` must have a tensor value. `out` is not supported
  370. for storing the result, however it can be used in combination with `where` to set
  371. the value at indices for which `where` is set to False.
  372. Args:
  373. x1 (Tensor): the divident.
  374. x2 (Tensor): the divisor.
  375. out (Tensor or None, optional): defaults to None.
  376. where (Tensor or None, optional): For any non-default value of type other
  377. than :class:`Tensor` or :class:`None`, the output retains its original value.
  378. This condition is broadcasted over the input. At locations where the
  379. condition is `True`, the out array will be set to the ufunc result.
  380. Elsewhere, the out array will retain its original value. Note that
  381. if an uninitialized out array is created via the default ``out=None``,
  382. locations within it where the condition is `False` will remain
  383. uninitialized.
  384. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  385. output Tensor.
  386. Returns:
  387. Tensor or scalar, this is a scalar if both `x1` and `x2` are scalars.
  388. Raises:
  389. TypeError: if the input is not a tensor.
  390. Supported Platforms:
  391. ``Ascend`` ``GPU`` ``CPU``
  392. Examples:
  393. >>> x1 = np.full((3, 2), [1, 2])
  394. >>> x2 = np.full((3, 2), [3, 4])
  395. >>> output = np.divide(x1, x2)
  396. >>> print(output)
  397. [[0.33333333, 0.5],
  398. [0.33333333, 0.5],
  399. [0.33333333, 0.5]]
  400. """
  401. if not _check_is_float(F.dtype(x1)) and not _check_is_float(F.dtype(x2)):
  402. x1 = F.cast(x1, mstype.float32)
  403. x2 = F.cast(x2, mstype.float32)
  404. return _apply_tensor_op(F.tensor_div, x1, x2, out=out, where=where, dtype=dtype)
  405. def true_divide(x1, x2, out=None, where=True, dtype=None):
  406. """
  407. Returns a true division of the inputs, element-wise.
  408. Instead of the Python traditional ‘floor division’, this returns a true
  409. division.
  410. Note:
  411. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  412. not supported.
  413. When `where` is provided, `out` must have a tensor value. `out` is not supported
  414. for storing the result, however it can be used in combination with `where` to set
  415. the value at indices for which `where` is set to False.
  416. Args:
  417. x1 (Tensor): the divident.
  418. x2 (Tensor): the divisor.
  419. out (Tensor or None, optional)
  420. where (Tensor, optional):
  421. This condition is broadcast over the input. At locations where the
  422. condition is True, the out array will be set to the ufunc result.
  423. Elsewhere, the out array will retain its original value. Note that
  424. if an uninitialized out array is created via the default out=None,
  425. locations within it where the condition is False will remain
  426. uninitialized.
  427. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  428. output Tensor.
  429. Returns:
  430. Tensor or scalar, this is a scalar if both `x1` and `x2` are scalars.
  431. Raises:
  432. TypeError: if the input is not a tensor.
  433. Supported Platforms:
  434. ``Ascend`` ``GPU`` ``CPU``
  435. Examples:
  436. >>> x1 = np.full((3, 2), [1, 2])
  437. >>> x2 = np.full((3, 2), [3, 4])
  438. >>> output = np.true_divide(x1, x2)
  439. >>> print(output)
  440. [[0.33333333, 0.5],
  441. [0.33333333, 0.5],
  442. [0.33333333, 0.5]]
  443. """
  444. return divide(x1, x2, out=out, where=where, dtype=dtype)
  445. def power(x1, x2, out=None, where=True, dtype=None):
  446. """
  447. First array elements raised to powers from second array, element-wise.
  448. Raises each base in `x1` to the positionally-corresponding power in `x2`.
  449. Note:
  450. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  451. not supported.
  452. When `where` is provided, `out` must have a tensor value. `out` is not supported
  453. for storing the result, however it can be used in combination with `where` to set
  454. the value at indices for which `where` is set to False.
  455. On GPU, the supported dtypes are np.float16, and np.float32.
  456. Args:
  457. x1 (Tensor): the bases.
  458. x2 (Tensor): the exponents.
  459. out (Tensor or None, optional): defaults to None.
  460. where (Tensor or None, optional): For any non-default value of type other
  461. than :class:`Tensor` or :class:`None`, the output retains its original value.
  462. This condition is broadcasted over the input. At locations where the
  463. condition is `True`, the out array will be set to the ufunc result.
  464. Elsewhere, the out array will retain its original value. Note that
  465. if an uninitialized out array is created via the default ``out=None``,
  466. locations within it where the condition is `False` will remain
  467. uninitialized.
  468. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  469. output Tensor.
  470. Returns:
  471. Tensor or scalar, the bases in `x1` raised to the exponents in `x2`. This
  472. is a scalar if both `x1` and `x2` are scalars.
  473. Raises:
  474. TypeError: if the input is not a tensor.
  475. Supported Platforms:
  476. ``Ascend`` ``GPU`` ``CPU``
  477. Examples:
  478. >>> x1 = np.full((3, 2), [1, 2]).astype('float32')
  479. >>> x2 = np.full((3, 2), [3, 4]).astype('float32')
  480. >>> output = np.power(x1, x2)
  481. >>> print(output)
  482. [[ 1, 16],
  483. [ 1, 16],
  484. [ 1, 16]]
  485. """
  486. return _apply_tensor_op(F.tensor_pow, x1, x2, out=out, where=where, dtype=dtype)
  487. def float_power(x1, x2, out=None, where=True, dtype=None):
  488. """
  489. First array elements raised to powers from second array, element-wise.
  490. Raise each base in `x1` to the positionally-corresponding power in `x2`. `x1` and
  491. `x2` must be broadcastable to the same shape. This differs from the power
  492. function in that integers, float16, and float64 are promoted to floats with
  493. a minimum precision of float32 so that the result is always inexact. The
  494. intent is that the function will return a usable result for negative powers
  495. and seldom overflow for positive powers.
  496. Note:
  497. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  498. not supported.
  499. When `where` is provided, `out` must have a tensor value. `out` is not supported
  500. for storing the result, however it can be used in combination with `where` to set
  501. the value at indices for which `where` is set to False.
  502. Integers and floats are promoted to float32 instead of float64.
  503. Args:
  504. x1 (Tensor): the bases.
  505. x2 (Tensor): the exponenets.
  506. out (Tensor or None, optional): defaults to None.
  507. where (Tensor or None, optional): For any non-default value of type other
  508. than :class:`Tensor` or :class:`None`, the output retains its original value.
  509. This condition is broadcasted over the input. At locations where the
  510. condition is `True`, the out array will be set to the ufunc result.
  511. Elsewhere, the out array will retain its original value. Note that
  512. if an uninitialized out array is created via the default ``out=None``,
  513. locations within it where the condition is `False` will remain
  514. uninitialized.
  515. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  516. output Tensor.
  517. Returns:
  518. Tensor or scalar, the bases in `x1` raised to the exponents in `x2`. This
  519. is a scalar if both `x1` and `x2` are scalars.
  520. Raises:
  521. TypeError: if the input is not a tensor.
  522. Supported Platforms:
  523. ``Ascend`` ``GPU`` ``CPU``
  524. Examples:
  525. >>> x1 = np.arange(6)
  526. >>> x2 = np.array(3)
  527. >>> output = np.float_power(x1, x2)
  528. >>> print(output)
  529. [ 0. 1. 8. 27. 64. 125.]
  530. """
  531. if not _check_same_type(F.dtype(x1), mstype.float32):
  532. x1 = F.cast(x1, mstype.float32)
  533. if not _check_same_type(F.dtype(x2), mstype.float32):
  534. x2 = F.cast(x2, mstype.float32)
  535. return _apply_tensor_op(F.tensor_pow, x1, x2, out=out, where=where, dtype=dtype)
  536. def minimum(x1, x2, out=None, where=True, dtype=None):
  537. """
  538. Element-wise minimum of tensor elements.
  539. Compares two tensors and returns a new tensor containing the element-wise minima.
  540. Note:
  541. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  542. not supported.
  543. When `where` is provided, `out` must have a tensor value. `out` is not supported
  544. for storing the result, however it can be used in combination with `where` to set
  545. the value at indices for which `where` is set to False.
  546. Unlike numpy, when one of the elements is a NaN, the second element is
  547. always returned regardless of whether the second element is a NaN, instead
  548. of returning NaN.
  549. Args:
  550. x1 (Tensor): first input tensor to be compared.
  551. x2 (Tensor): second input tensor to be compared.
  552. out (Tensor or None, optional), default is None.
  553. where (Tensor or None, optional): For any non-default value of type other
  554. than :class:`Tensor` or :class:`None`, the output retains its original value.
  555. This condition is broadcasted over the input. At locations where the
  556. condition is `True`, the out array will be set to the ufunc result.
  557. Elsewhere, the out array will retain its original value. Note that
  558. if an uninitialized out array is created via the default ``out=None``,
  559. locations within it where the condition is `False` will remain
  560. uninitialized.
  561. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  562. output Tensor.
  563. Returns:
  564. Tensor, element-wise minimum of `x1` and `x2`.
  565. Raises:
  566. TypeError: If inputs have types not specified above.
  567. ValueError: If the shapes of `x1` and `x2` cannot be broadcast.
  568. Supported Platforms:
  569. ``Ascend`` ``GPU`` ``CPU``
  570. Examples:
  571. >>> import mindspore.numpy as np
  572. >>> a = np.asarray([1, 2])
  573. >>> b = np.asarray([[1, 3],[1, 4]])
  574. >>> print(np.minimum(a, b))
  575. [[1 2]
  576. [1 2]]
  577. """
  578. if isinstance(x1, (int, float, bool, list, tuple, Tensor)) and \
  579. isinstance(x2, (int, float, bool, list, tuple, Tensor)):
  580. x1 = asarray_const(x1)
  581. x2 = asarray_const(x2)
  582. else:
  583. _raise_type_error("Input x1 and x2 are expected to be array_like")
  584. # if both are scalars, expand x1 to 1d tensor, since cpu kernel doesn't support
  585. # comparisons with 2 scalars
  586. if x1.ndim == 0 and x2.ndim == 0:
  587. x1 = expand_dims(x1, 0)
  588. return _apply_tensor_op(F.minimum, x1, x2, out=out, where=where, dtype=dtype).squeeze()
  589. if x1.ndim == 0:
  590. dtype = x2.dtype
  591. elif x2.ndim == 0:
  592. dtype = x1.dtype
  593. return _apply_tensor_op(F.minimum, x1, x2, out=out, where=where, dtype=dtype)
  594. def mean(a, axis=None, keepdims=False, dtype=None):
  595. """
  596. Computes the arithmetic mean along the specified axis.
  597. Returns the average of the array elements. The average is taken
  598. over the flattened array by default, otherwise over the specified
  599. axis.
  600. Note:
  601. Numpy arguments `out` is not supported.
  602. On GPU, the supported dtypes are np.float16, and np.float32.
  603. Args:
  604. a (Tensor): input tensor containing numbers whose mean is desired.
  605. If a is not an array, a conversion is attempted.
  606. axis (None or int or tuple of ints, optional): Axis or axes along
  607. which the means are computed. The default is to compute
  608. the mean of the flattened array. If this is a tuple of
  609. ints, a mean is performed over multiple axes.
  610. keepdims (bool, optional): If this is set to True, the axes which
  611. are reduced are left in the result as dimensions with
  612. size one. With this option, the result will broadcast
  613. correctly against the input tensor.
  614. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  615. output Tensor.
  616. Returns:
  617. Tensor or scalar, an array containing the mean values.
  618. Raises:
  619. ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
  620. if the axes contain duplicates.
  621. Supported Platforms:
  622. ``Ascend`` ``GPU`` ``CPU``
  623. Examples:
  624. >>> import mindspore.numpy as np
  625. >>> a = np.arange(6, dtype='float32')
  626. >>> output = np.mean(a, 0)
  627. >>> print(output)
  628. 2.5
  629. """
  630. axis = _check_axis_valid(axis, F.rank(a))
  631. shape_a = F.shape(a)
  632. if dtype is None:
  633. dtype = F.dtype(a)
  634. if _is_shape_empty(shape_a):
  635. if keepdims:
  636. shape_out = _shape_reduced_keepdims(shape_a, axis)
  637. else:
  638. shape_out = _shape_reduced(shape_a, axis)
  639. if _is_shape_empty(shape_out):
  640. return empty(F.dtype(a), shape_out)
  641. return full(shape_out, nan, dtype)
  642. if _is_scalar(shape_a):
  643. if keepdims:
  644. return a
  645. shape_out = _shape_reduced(shape_a, axis)
  646. return F.reshape(a, shape_out)
  647. if keepdims:
  648. res = _mean_keepdims(a, axis)
  649. else:
  650. res = _mean_default(a, axis)
  651. if not _check_same_type(dtype, F.dtype(res)):
  652. res = F.cast(res, dtype)
  653. return res
  654. def inner(a, b):
  655. """
  656. Returns the inner product of two tensors.
  657. Ordinary inner product of vectors for 1-D tensors (without complex
  658. conjugation), in higher dimensions a sum product over the last
  659. axes.
  660. Note:
  661. Numpy argument `out` is not supported.
  662. On GPU, the supported dtypes are np.float16, and np.float32.
  663. On CPU, the supported dtypes are np.float16, np.float32, and
  664. np.float64.
  665. Args:
  666. a (Tensor): input tensor. If `a` and `b` are nonscalar, their last
  667. dimensions must match.
  668. b (Tensor): input tensor. If `a` and `b` are nonscalar, their last
  669. dimensions must match.
  670. Returns:
  671. Tensor or scalar.
  672. Raises:
  673. ValueError: if ``x1.shape[-1] != x2.shape[-1]``.
  674. Supported Platforms:
  675. ``Ascend`` ``GPU`` ``CPU``
  676. Examples:
  677. >>> import mindspore.numpy as np
  678. >>> a = np.ones((5, 3))
  679. >>> b = np.ones((2, 7, 3))
  680. >>> output = np.inner(a, b)
  681. >>> print(output)
  682. [[[3. 3. 3. 3. 3. 3. 3.]
  683. [3. 3. 3. 3. 3. 3. 3.]]
  684. [[3. 3. 3. 3. 3. 3. 3.]
  685. [3. 3. 3. 3. 3. 3. 3.]]
  686. [[3. 3. 3. 3. 3. 3. 3.]
  687. [3. 3. 3. 3. 3. 3. 3.]]
  688. [[3. 3. 3. 3. 3. 3. 3.]
  689. [3. 3. 3. 3. 3. 3. 3.]]
  690. [[3. 3. 3. 3. 3. 3. 3.]
  691. [3. 3. 3. 3. 3. 3. 3.]]]
  692. """
  693. if F.rank(a) == 0 or F.rank(b) == 0:
  694. return F.tensor_mul(a, b)
  695. _check_shape_aligned(F.shape(a), F.shape(b))
  696. aligned_shape_a = (F.shape_mul(F.shape(a)[:-1]), F.shape(a)[-1])
  697. aligned_shape_b = (F.shape_mul(F.shape(b)[:-1]), F.shape(a)[-1])
  698. a_aligned = F.reshape(a, aligned_shape_a)
  699. b_aligned = F.reshape(b, aligned_shape_b)
  700. res = _matmul_T(a_aligned, b_aligned)
  701. res = F.reshape(res, F.shape(a)[:-1] + F.shape(b)[:-1])
  702. return res
  703. def dot(a, b):
  704. """
  705. Returns the dot product of two arrays.
  706. Specifically,
  707. If both `a` and `b` are 1-D arrays, it is inner product of vectors
  708. (without complex conjugation).
  709. If both `a` and `b` are 2-D arrays, it is matrix multiplication.
  710. If either `a` or `b` is 0-D (scalar), it is equivalent to multiply.
  711. If `a` is an `N-D` array and `b` is a 1-D array, it is a sum product
  712. over the last axis of `a` and `b`.
  713. If `a` is an `N-D` array and `b` is an `M-D` array (where ``M>=2``), it is a
  714. sum product over the last axis of `a` and the second-to-last axis of `b`:
  715. ``dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])``
  716. Note:
  717. Numpy argument `out` is not supported.
  718. On GPU, the supported dtypes are np.float16, and np.float32.
  719. On CPU, the supported dtypes are np.float16, np.float32, and
  720. np.float64.
  721. Args:
  722. a (Tensor): input tensor
  723. b (Tensor): input tensor
  724. Returns:
  725. Tensor or scalar, the dot product of `a` and `b`. If `a` and `b` are
  726. both scalars or both 1-D arrays then a scalar is returned;
  727. otherwise an array is returned
  728. Raises:
  729. ValueError: If the last dimension of `a` is not the same size
  730. as the second-to-last dimension of `b`.
  731. Supported Platforms:
  732. ``Ascend`` ``GPU`` ``CPU``
  733. Examples:
  734. >>> import mindspore.numpy as np
  735. >>> a = np.full((1, 3), 7).astype('float32')
  736. >>> b = np.full((2, 3, 4), 5).astype('float32')
  737. >>> output = np.dot(a, b)
  738. >>> print(output)
  739. [[[105, 105, 105, 105],
  740. [105, 105, 105, 105]]]
  741. """
  742. ndim_a, ndim_b = F.rank(a), F.rank(b)
  743. if ndim_a > 0 and ndim_b >= 2:
  744. perm = F.make_range(ndim_b)
  745. perm = perm[:-2] + (perm[-1],) + (perm[-2],)
  746. b = F.transpose(b, perm)
  747. return inner(a, b)
  748. def outer(a, b):
  749. """
  750. Computes the outer product of two vectors.
  751. Given two vectors, ``a = [a0, a1, ..., aM]`` and ``b = [b0, b1, ..., bN]``,
  752. the outer product is:
  753. ``[[a0*b0 a0*b1 ... a0*bN ]``
  754. ``[a1*b0 . ]``
  755. ``[ ... . ]``
  756. ``[aM*b0 aM*bN ]]``
  757. Note:
  758. Numpy argument ``out`` is not supported.
  759. On GPU, the supported dtypes are np.float16, and np.float32.
  760. On CPU, the supported dtypes are np.float16, np.float32, and
  761. np.float64.
  762. Args:
  763. a (Tensor): first input vector. Input is flattened if not
  764. already 1-dimensional.
  765. b (Tensor): second input vector. Input is flattened if not
  766. already 1-dimensional.
  767. Returns:
  768. Tensor or scalar, ``out[i, j] = a[i] * b[j]``.
  769. Raises:
  770. TypeError: if the input is not a tensor.
  771. Supported Platforms:
  772. ``Ascend`` ``GPU`` ``CPU``
  773. Examples:
  774. >>> import mindspore.numpy as np
  775. >>> a = np.full(7, 2).astype('float32')
  776. >>> b = np.full(4, 3).astype('float32')
  777. >>> output = np.outer(a, b)
  778. >>> print(output)
  779. [[6, 6, 6, 6],
  780. [6, 6, 6, 6],
  781. [6, 6, 6, 6],
  782. [6, 6, 6, 6],
  783. [6, 6, 6, 6],
  784. [6, 6, 6, 6],
  785. [6, 6, 6, 6]]
  786. """
  787. _check_input_tensor(a, b)
  788. if F.rank(a) != 1:
  789. a = ravel(a)
  790. if F.rank(b) != 1:
  791. b = ravel(b)
  792. a = F.reshape(a, (F.shape(a)[0], 1))
  793. b = _expand(b, 2)
  794. return _matmul(a, b)
  795. def tensordot(a, b, axes=2):
  796. """
  797. Computes tensor dot product along specified axes.
  798. Given two tensors, `a` and `b`, and an array_like object containing two array_like
  799. objects, `(a_axes, b_axes)`, sum the products of `a`’s and `b`’s elements (components)
  800. over the axes specified by `a_axes` and `b_axes`. The third argument can be a single
  801. non-negative integer_like scalar, `N`; if it is such, then the last `N` dimensions of
  802. `a` and the first `N` dimensions of `b` are summed over.
  803. Three common use cases are:
  804. ``axes = 0`` : tensor product
  805. ``axes = 1`` : tensor dot product
  806. ``axes = 2`` : (default) tensor double contraction
  807. When axes is integer_like, the sequence for evaluation will be: first the `-Nth`
  808. axis in `a` and 0th axis in `b`, and the -1th axis in `a` and `Nth` axis in `b` last.
  809. When there is more than one axis to sum over - and they are not the last (first)
  810. axes of `a` `(b)` - the argument axes should consist of two sequences of the same
  811. length, with the first axis to sum over given first in both sequences, the second
  812. axis second, and so forth.
  813. The shape of the result consists of the non-contracted axes of the first tensor,
  814. followed by the non-contracted axes of the second.
  815. Note:
  816. On CPU, the supported dypes are np.float16 and np.float32.
  817. On GPU, the supported dypes are np.float16 and np.float32.
  818. Args:
  819. a (Tensor): Tensor to "dot".
  820. b (Tensor): Tensor to “dot”.
  821. axes (int or sequence of ints):
  822. integer_like: If an int `N`, sum over the last `N` axes of `a` and the first `N`
  823. axes of `b` in order. The sizes of the corresponding axes must match.
  824. sequence of ints: Or, a list of axes to be summed over, first sequence
  825. applying to `a`, second to `b`. Both elements `array_like` must be of the same
  826. length.
  827. Returns:
  828. Tensor, or list of tensors, the tensor dot product of the input.
  829. Supported Platforms:
  830. ``Ascend`` ``GPU`` ``CPU``
  831. Examples:
  832. >>> a = np.ones((3, 4, 5))
  833. >>> b = np.ones((4, 3, 2))
  834. >>> output = np.tensordot(a, b, axes=([1,0],[0,1]))
  835. >>> print(output.shape)
  836. (5, 2)
  837. """
  838. if F.rank(a)*F.rank(b) == 0 and axes == 0:
  839. return F.tensor_mul(a, b)
  840. return C.tensor_dot(a, b, axes)
  841. def std(x, axis=None, ddof=0, keepdims=False):
  842. """
  843. Computes the standard deviation along the specified axis.
  844. The standard deviation is the square root of the average of the squared deviations
  845. from the mean, i.e., :math:`std = sqrt(mean(abs(x - x.mean())**2))`.
  846. Returns the standard deviation, which is computed for the flattened array by default,
  847. otherwise over the specified axis.
  848. Note:
  849. Numpy arguments `dtype` and `out` are not supported.
  850. Args:
  851. x (Tensor): A Tensor to be calculated.
  852. axis (Union[None, int, tuple(int)]): Axis or axes along which the standard
  853. deviation is computed. Default: `None`.
  854. If `None`, compute the standard deviation of the flattened array.
  855. ddof (int): Means Delta Degrees of Freedom. The divisor used in calculations is :math:`N - ddof`,
  856. where :math:`N` represents the number of elements. Default: 0.
  857. keepdims: Default: `False`.
  858. Returns:
  859. Standard deviation tensor.
  860. Supported Platforms:
  861. ``Ascend`` ``GPU`` ``CPU``
  862. Examples:
  863. >>> import mindspore.numpy as np
  864. >>> input_x = np.array([1., 2., 3., 4.])
  865. >>> output = np.std(input_x)
  866. >>> print(output)
  867. 1.118034
  868. """
  869. if _is_shape_empty(x.shape):
  870. return full((), nan, F.dtype(x))
  871. if not isinstance(ddof, int):
  872. _raise_type_error("integer argument expected, but got ", ddof)
  873. if axis is None:
  874. axis = ()
  875. else:
  876. _check_axis_type(axis, True, True, False)
  877. axis = _canonicalize_axis(axis, x.ndim)
  878. x_mean = _mean_keepdims(x, axis)
  879. x_sub = F.tensor_sub(x, x_mean)
  880. x_pow = F.tensor_pow(x_sub, 2)
  881. if keepdims:
  882. x_sum = _reduce_sum_keepdims(x_pow, axis)
  883. else:
  884. x_sum = _reduce_sum_default(x_pow, axis)
  885. if isinstance(axis, int):
  886. nums = x.shape[axis]
  887. else:
  888. nums = _get_size(x, axis)
  889. x_std = F.tensor_pow(F.tensor_div(x_sum, nums - ddof), 0.5)
  890. return x_std
  891. def var(x, axis=None, ddof=0, keepdims=False):
  892. """
  893. Computes the variance along the specified axis.
  894. The variance is the average of the squared deviations from the mean, i.e.,
  895. :math:`var = mean(abs(x - x.mean())**2)`.
  896. Returns the variance, which is computed for the flattened array by default,
  897. otherwise over the specified axis.
  898. Note:
  899. Numpy arguments `dtype` and `out` are not supported.
  900. Args:
  901. x (Tensor): A Tensor to be calculated.
  902. axis (Union[None, int, tuple(int)]): Axis or axes along which the variance is computed.
  903. The default is to compute the variance of the flattened array. Default: `None`.
  904. ddof (int): Means Delta Degrees of Freedom. Default: 0.
  905. The divisor used in calculations is :math:`N - ddof`, where :math:`N` represents the number of elements.
  906. keepdims (bool): Default: `False`.
  907. Supported Platforms:
  908. ``Ascend`` ``GPU`` ``CPU``
  909. Returns:
  910. Standard deviation tensor.
  911. Examples:
  912. >>> import mindspore.numpy as np
  913. >>> input_x = np.array([1., 2., 3., 4.])
  914. >>> output = np.var(input_x)
  915. >>> print(output)
  916. 1.25
  917. """
  918. if _is_shape_empty(x.shape):
  919. return full((), nan, F.dtype(x))
  920. x_std = std(x, axis, ddof, keepdims)
  921. return F.tensor_pow(x_std, 2)
  922. def ptp(x, axis=None, out=None, keepdims=False):
  923. """
  924. Range of values (maximum - minimum) along an axis.
  925. The name of the function comes from the acronym for ‘peak to peak’.
  926. Note:
  927. Numpy arguments `dtype` and `out` are not supported.
  928. Args:
  929. x (Tensor): Input tensor.
  930. axis (Union[None, int, tuple(int)]): Axis or axes along which the range is computed.
  931. The default is to compute the variance of the flattened array. Default: None.
  932. keepdims (bool): Default is False.
  933. Returns:
  934. Tensor.
  935. Raises:
  936. TypeError: if inputs have types not specified above.
  937. Supported Platforms:
  938. ``Ascend`` ``GPU`` ``CPU``
  939. Examples:
  940. >>> import mindspore.numpy as np
  941. >>> x = np.array([[4.0, 9.0, 2.0, 10.0], [6.0, 9.0, 7.0, 12.0]])
  942. >>> print(np.ptp(x, axis=1))
  943. [8. 6.]
  944. >>> print(np.ptp(x, axis=0))
  945. [2. 0. 5. 2.]
  946. """
  947. _check_input_tensor(x)
  948. if axis is None:
  949. axis = ()
  950. else:
  951. _check_axis_type(axis, True, True, False)
  952. axis = _canonicalize_axis(axis, x.ndim)
  953. if keepdims:
  954. x_min = _reduce_min_keepdims(x, axis)
  955. x_max = _reduce_max_keepdims(x, axis)
  956. else:
  957. x_min = _reduce_min_default(x, axis)
  958. x_max = _reduce_max_default(x, axis)
  959. return F.tensor_sub(x_max, x_min)
  960. def average(x, axis=None, weights=None, returned=False):
  961. """
  962. Computes the weighted average along the specified axis.
  963. Args:
  964. x (Tensor): A Tensor to be averaged.
  965. axis (Union[None, int, tuple(int)]): Axis along which to average `x`. Default: `None`.
  966. If the axis is `None`, it will average over all of the elements of the tensor `x`.
  967. If the axis is negative, it counts from the last to the first axis.
  968. weights (Tensor): Weights associated with the values in `x`. Default: `None`.
  969. If `weights` is `None`, all the data in `x` are assumed to have a weight equal to one.
  970. If `weights` is 1-D tensor, the length must be the same as the given axis.
  971. Otherwise, `weights` should have the same shape as `x`.
  972. returned (bool): Default: `False`.
  973. If `True`, the tuple (average, sum_of_weights) is returned.
  974. If `False`, only the average is returned.
  975. Returns:
  976. Averaged Tensor. If returned is `True`, return tuple.
  977. Supported Platforms:
  978. ``Ascend`` ``GPU`` ``CPU``
  979. Examples:
  980. >>> import mindspore.numpy as np
  981. >>> input_x = np.array([[1., 2.], [3., 4.]])
  982. >>> output = np.average(input_x, axis=0, weights=input_x, returned=True)
  983. >>> print(output)
  984. (Tensor(shape=[2], dtype=Float32, value= [ 2.50000000e+00, 3.33333325e+00]),
  985. Tensor(shape=[2], dtype=Float32, value= [ 4.00000000e+00, 6.00000000e+00]))
  986. """
  987. if axis is None:
  988. axis = ()
  989. else:
  990. _check_axis_type(axis, True, True, False)
  991. axis = _canonicalize_axis(axis, x.ndim)
  992. x_avg = full((), nan, F.dtype(x))
  993. sum_of_weights = None
  994. if weights is None:
  995. x_avg = mean(x, axis)
  996. if axis is None:
  997. sum_of_weights = full((), x.size, F.dtype(x))
  998. else:
  999. fill_value = 1
  1000. if isinstance(axis, int) or isinstance(axis, tuple) and F.tuple_len(axis) == 1:
  1001. fill_value = x.shape[axis]
  1002. elif axis is None or axis == ():
  1003. for sh in x.shape:
  1004. fill_value *= sh
  1005. else:
  1006. for ax in axis:
  1007. fill_value *= x.shape[ax]
  1008. sum_of_weights = full_like(x_avg, fill_value, F.dtype(x))
  1009. else:
  1010. if x.shape == weights.shape:
  1011. x_avg, sum_of_weights = comput_avg(x, axis, weights)
  1012. elif F.rank(weights) == 1:
  1013. if not isinstance(axis, int):
  1014. _raise_type_error("Axis must be specified when shapes of x and weights differ.")
  1015. perm = _expanded_shape(x.ndim, weights.shape[0], axis)
  1016. weights = weights.reshape(perm)
  1017. x_avg, sum_of_weights = comput_avg(x, axis, weights)
  1018. else:
  1019. _raise_type_error("Weights should be None, 1-D or the same shape as input x.")
  1020. if returned:
  1021. if x_avg.shape != sum_of_weights.shape:
  1022. sum_of_weights = _broadcast_to(sum_of_weights, sum_of_weights.shape, x_avg.shape, x_avg.ndim)
  1023. return (x_avg, sum_of_weights)
  1024. return x_avg
  1025. def comput_avg(x, axis, weights):
  1026. """Computes average value of input x with given parameters."""
  1027. x_mul = F.tensor_mul(x, weights)
  1028. x_sum = _reduce_sum_default(x_mul, axis)
  1029. sum_of_weights = _reduce_sum_default(weights, axis)
  1030. x_avg = F.tensor_div(x_sum, sum_of_weights)
  1031. return x_avg, sum_of_weights
  1032. def matmul(x1, x2, dtype=None):
  1033. """
  1034. Returns the matrix product of two arrays.
  1035. Note:
  1036. Numpy arguments `out`, `casting`, `order`, `subok`, `signature`, and `extobj` are
  1037. not supported.
  1038. On GPU, the supported dtypes are np.float16 and np.float32.
  1039. On CPU, the supported dtypes are np.float16 and np.float32.
  1040. Args:
  1041. x1 (Tensor): Input tensor, scalar not allowed.
  1042. x2 (Tensor): Input tensor, scalar not allowed.
  1043. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1044. output Tensor.
  1045. Returns:
  1046. Tensor or scalar, the matrix product of the inputs. This is a scalar only
  1047. when both `x1`, `x2` are 1-d vectors.
  1048. Raises:
  1049. ValueError: If the last dimension of `x1` is not the same size as the
  1050. second-to-last dimension of `x2`, or if a scalar value is passed in.
  1051. Supported Platforms:
  1052. ``Ascend`` ``GPU`` ``CPU``
  1053. Examples:
  1054. >>> x1 = np.arange(2*3*4).reshape(2, 3, 4).astype('float32')
  1055. >>> x2 = np.arange(4*5).reshape(4, 5).astype('float32')
  1056. >>> output = np.matmul(x1, x2)
  1057. >>> print(output)
  1058. [[[ 70. 76. 82. 88. 94.]
  1059. [ 190. 212. 234. 256. 278.]
  1060. [ 310. 348. 386. 424. 462.]]
  1061. [[ 430. 484. 538. 592. 646.]
  1062. [ 550. 620. 690. 760. 830.]
  1063. [ 670. 756. 842. 928. 1014.]]]
  1064. """
  1065. # performs type promotion
  1066. dtype1 = F.dtype(x1)
  1067. dtype2 = F.dtype(x2)
  1068. dtype_out = _promote(dtype1, dtype2)
  1069. if not _check_same_type(dtype1, dtype_out):
  1070. x1 = F.cast(x1, dtype_out)
  1071. if not _check_same_type(dtype2, dtype_out):
  1072. x2 = F.cast(x2, dtype_out)
  1073. ndim1_orig, ndim2_orig = F.rank(x1), F.rank(x2)
  1074. shape1_orig, shape2_orig = F.shape(x1), F.shape(x2)
  1075. _check_matmul_shapes(shape1_orig, shape2_orig)
  1076. ndim_aligned = _max(ndim1_orig, ndim2_orig)
  1077. transpose_b = ndim2_orig == 1
  1078. shape_backbone = _infer_out_shape(
  1079. shape1_orig[:-2], shape2_orig[:-2])
  1080. # infers the shape of the output
  1081. shape_out = shape_backbone + _infer_shape_rem(shape1_orig, shape2_orig,
  1082. ndim1_orig, ndim2_orig, transpose_b)
  1083. x1 = _expand(x1, _max(ndim_aligned, 2))
  1084. x2 = _expand(x2, _max(ndim_aligned, 2))
  1085. shape1_aligned, shape2_aligned = F.shape(x1), F.shape(x2)
  1086. if ndim_aligned <= 2:
  1087. res = P.MatMul(False, transpose_b)(x1, x2)
  1088. else:
  1089. # broadcasts x1.shape[:-2] with x2.shape[:-2]
  1090. shape_aligned = shape_backbone + _infer_shape_rem(shape1_aligned, shape2_aligned,
  1091. ndim_aligned, ndim_aligned,
  1092. transpose_b)
  1093. x1 = _broadcast_to(x1, shape1_aligned[:-2], shape_aligned[:-2], ndim_aligned)
  1094. x2 = _broadcast_to(x2, shape2_aligned[:-2], shape_aligned[:-2], ndim_aligned)
  1095. res = P.BatchMatMul(False, transpose_b)(x1, x2)
  1096. if dtype is not None and not _check_same_type(dtype_out, dtype):
  1097. res = F.cast(res, dtype)
  1098. return F.reshape(res, shape_out)
  1099. def square(x, out=None, where=True, dtype=None):
  1100. """
  1101. Returns the element-wise square of the input.
  1102. Note:
  1103. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  1104. not supported.
  1105. When `where` is provided, `out` must have a tensor value. `out` is not supported
  1106. for storing the result, however it can be used in combination with `where` to set
  1107. the value at indices for which `where` is set to False.
  1108. On GPU, the supported dtypes are np.float16 and np.float32.
  1109. Args:
  1110. x (Tensor): Input data.
  1111. out (Tensor or None, optional): defaults to None.
  1112. where (Tensor or None, optional): For any non-default value of type other
  1113. than :class:`Tensor` or :class:`None`, the output retains its original value.
  1114. This condition is broadcasted over the input. At locations where the
  1115. condition is `True`, the out array will be set to the ufunc result.
  1116. Elsewhere, the out array will retain its original value. Note that
  1117. if an uninitialized out array is created via the default ``out=None``,
  1118. locations within it where the condition is `False` will remain
  1119. uninitialized.
  1120. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1121. output Tensor.
  1122. Returns:
  1123. Tensor or scalar, element-wise ``x*x``, of the same shape and dtype as `x`.
  1124. This is a scalar if `x` is a scalar..
  1125. Raises:
  1126. TypeError: if the input is not a tensor.
  1127. Supported Platforms:
  1128. ``Ascend`` ``GPU`` ``CPU``
  1129. Examples:
  1130. >>> x = np.square(np.arange(6).reshape(2, 3).astype('float32'))
  1131. >>> print(x)
  1132. [[ 0. 1. 4.]
  1133. [ 9. 16. 25.]]
  1134. """
  1135. return _apply_tensor_op(F.square, x, out=out, where=where, dtype=dtype)
  1136. def sqrt(x, out=None, where=True, dtype=None):
  1137. """
  1138. Returns the non-negative square-root of an array, element-wise.
  1139. Note:
  1140. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  1141. not supported.
  1142. When `where` is provided, `out` must have a tensor value. `out` is not supported
  1143. for storing the result, however it can be used in combination with `where` to set
  1144. the value at indices for which `where` is set to False.
  1145. On GPU, the supported dtypes are np.float16 and np.float32.
  1146. Args:
  1147. x (Tensor): The values whose square-roots are required.
  1148. out (Tensor or None, optional): defaults to None.
  1149. where (Tensor or None, optional): For any non-default value of type other
  1150. than :class:`Tensor` or :class:`None`, the output retains its original value.
  1151. This condition is broadcasted over the input. At locations where the
  1152. condition is `True`, the out array will be set to the ufunc result.
  1153. Elsewhere, the out array will retain its original value. Note that
  1154. if an uninitialized out array is created via the default ``out=None``,
  1155. locations within it where the condition is `False` will remain
  1156. uninitialized.
  1157. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1158. output Tensor.
  1159. Returns:
  1160. Tensor or scalar, an array of the same shape as `x`, containing the positive
  1161. square-root of each element in `x`. For negative elements, nan is returned.
  1162. This is a scalar if `x` is a scalar.
  1163. Raises:
  1164. TypeError: if the input is not a tensor.
  1165. Supported Platforms:
  1166. ``Ascend`` ``GPU`` ``CPU``
  1167. Examples:
  1168. >>> x = np.arange(6).reshape(2, 3).astype('float32')
  1169. >>> x_squared = np.square(x)
  1170. >>> output = np.sqrt(x_squared)
  1171. >>> print(output)
  1172. [[ 0. 1. 2.]
  1173. [ 3. 4. 5.]]
  1174. """
  1175. return _apply_tensor_op(F.sqrt, x, out=out, where=where, dtype=dtype)
  1176. def reciprocal(x, out=None, where=True, dtype=None):
  1177. """
  1178. Returns the reciprocal of the argument, element-wise.
  1179. Calculates ``1/x``.
  1180. Note:
  1181. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  1182. not supported.
  1183. When `where` is provided, `out` must have a tensor value. `out` is not supported
  1184. for storing the result, however it can be used in combination with `where` to set
  1185. the value at indices for which `where` is set to False.
  1186. Args:
  1187. x (Tensor): Input array. For integer arguments with absolute value larger
  1188. than 1 the result is always zero because of the way Python handles
  1189. integer division. For integer zero the result is an overflow.
  1190. out (Tensor or None, optional): defaults to None.
  1191. where (Tensor or None, optional): For any non-default value of type other
  1192. than :class:`Tensor` or :class:`None`, the output retains its original value.
  1193. This condition is broadcasted over the input. At locations where the
  1194. condition is `True`, the out array will be set to the ufunc result.
  1195. Elsewhere, the out array will retain its original value. Note that
  1196. if an uninitialized out array is created via the default ``out=None``,
  1197. locations within it where the condition is `False` will remain
  1198. uninitialized.
  1199. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1200. output Tensor.
  1201. Returns:
  1202. Tensor or scalar, this is a scalar if `x` is a scalar.
  1203. Raises:
  1204. TypeError: if the input is not a tensor.
  1205. Supported Platforms:
  1206. ``Ascend`` ``GPU`` ``CPU``
  1207. Examples:
  1208. >>> x = np.arange(1, 7).reshape(2, 3).astype('float32')
  1209. >>> output = np.reciprocal(x)
  1210. >>> print(output)
  1211. [[1. 0.5 0.33333334]
  1212. [0.25 0.2 0.16666667]]
  1213. """
  1214. return _apply_tensor_op(lambda x: F.tensor_div(1, x), x, out=out, where=where, dtype=dtype)
  1215. def log(x, out=None, where=True, dtype=None):
  1216. """
  1217. Returns the natural logarithm, element-wise.
  1218. The natural logarithm log is the inverse of the exponential function, so that
  1219. ``log(exp(x)) = x``. The natural logarithm is logarithm in base e.
  1220. Note:
  1221. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  1222. not supported.
  1223. When `where` is provided, `out` must have a tensor value. `out` is not supported
  1224. for storing the result, however it can be used in combination with `where` to set
  1225. the value at indices for which `where` is set to False.
  1226. On GPU, the supported dtypes are np.float16, and np.float32.
  1227. On CPU, the supported dtypes are np.float16, np.float32, and np.float64.
  1228. Args:
  1229. x (Tensor): Input array. For integer arguments with absolute value larger
  1230. than 1 the result is always zero because of the way Python handles
  1231. integer division. For integer zero the result is an overflow.
  1232. out (Tensor or None, optional): defaults to None.
  1233. where (Tensor or None, optional): For any non-default value of type other
  1234. than :class:`Tensor` or :class:`None`, the output retains its original value.
  1235. This condition is broadcasted over the input. At locations where the
  1236. condition is `True`, the out array will be set to the ufunc result.
  1237. Elsewhere, the out array will retain its original value. Note that
  1238. if an uninitialized out array is created via the default ``out=None``,
  1239. locations within it where the condition is `False` will remain
  1240. uninitialized.
  1241. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1242. output Tensor.
  1243. Returns:
  1244. Tensor or scalar, the natural logarithm of `x`, element-wise. This is a
  1245. scalar if `x` is a scalar.
  1246. Raises:
  1247. TypeError: if the input is not a tensor.
  1248. Supported Platforms:
  1249. ``Ascend`` ``GPU`` ``CPU``
  1250. Examples:
  1251. >>> x = np.array([1, 2, 3]).astype('float32')
  1252. >>> output = np.log(x)
  1253. >>> print(output)
  1254. [1.09861 1.3862929 1.6094407]
  1255. """
  1256. return _apply_tensor_op(F.log, x, out=out, where=where, dtype=dtype)
  1257. def maximum(x1, x2, out=None, where=True, dtype=None):
  1258. """
  1259. Returns the element-wise maximum of array elements.
  1260. Compares two arrays and returns a new array containing the element-wise maxima.
  1261. Note:
  1262. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  1263. not supported.
  1264. When `where` is provided, `out` must have a tensor value. `out` is not supported
  1265. for storing the result, however it can be used in combination with `where` to set
  1266. the value at indices for which `where` is set to False.
  1267. Unlike numpy, when one of the elements is a NaN, the second element is
  1268. always returned regardless of whether the second element is a NaN, instead
  1269. of returning NaN.
  1270. Args:
  1271. x1 (Tensor): Input array
  1272. x2 (Tensor): The array holding the elements to be compared. If
  1273. ``x1.shape != x2.shape``, they must be broadcastable to a common shape
  1274. (which becomes the shape of the output).
  1275. out (Tensor or None, optional): defaults to None.
  1276. where (Tensor or None, optional): For any non-default value of type other
  1277. than :class:`Tensor` or :class:`None`, the output retains its original value.
  1278. This condition is broadcasted over the input. At locations where the
  1279. condition is `True`, the out array will be set to the ufunc result.
  1280. Elsewhere, the out array will retain its original value. Note that
  1281. if an uninitialized out array is created via the default ``out=None``,
  1282. locations within it where the condition is `False` will remain
  1283. uninitialized.
  1284. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1285. output Tensor.
  1286. Returns:
  1287. Tensor or scalar, the maximum of `x1` and `x2`, element-wise. This is a scalar
  1288. if both `x1` and `x2` are scalars.
  1289. Raises:
  1290. TypeError: if the input is not a tensor.
  1291. Supported Platforms:
  1292. ``Ascend`` ``GPU`` ``CPU``
  1293. Examples:
  1294. >>> output = np.maximum(np.array([2, 3, 4]), np.array([1, 5, 2]))
  1295. >>> print(output)
  1296. [2 5 4]
  1297. """
  1298. if isinstance(x1, (int, float, bool, list, tuple, Tensor)) and \
  1299. isinstance(x2, (int, float, bool, list, tuple, Tensor)):
  1300. x1 = asarray_const(x1)
  1301. x2 = asarray_const(x2)
  1302. else:
  1303. _raise_type_error("Input x1 and x2 are expected to be array_like")
  1304. # F.maximum does not support when both operands are scalar
  1305. if x1.ndim == 0 and x2.ndim == 0:
  1306. x1 = expand_dims(x1, 0)
  1307. return _apply_tensor_op(F.maximum, x1, x2, out=out, where=where, dtype=dtype).squeeze()
  1308. if x1.ndim == 0:
  1309. dtype = x2.dtype
  1310. elif x2.ndim == 0:
  1311. dtype = x1.dtype
  1312. return _apply_tensor_op(F.maximum, x1, x2, out=out, where=where, dtype=dtype)
  1313. def heaviside(x1, x2, out=None, where=True, dtype=None):
  1314. """
  1315. Computes the Heaviside step function.
  1316. Note:
  1317. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  1318. not supported.
  1319. When `where` is provided, `out` must have a tensor value. `out` is not supported
  1320. for storing the result, however it can be used in combination with `where` to set
  1321. the value at indices for which `where` is set to False.
  1322. Args:
  1323. x1 (Tensor): Input values.
  1324. x2 (Tensor): The value of the function when `x1` is 0. If
  1325. ``x1.shape != x2.shape``, they must be broadcastable to a common shape
  1326. (which becomes the shape of the output).
  1327. out (Tensor or None, optional): defaults to None.
  1328. where (Tensor or None, optional): For any non-default value of type other
  1329. than :class:`Tensor` or :class:`None`, the output retains its original value.
  1330. This condition is broadcasted over the input. At locations where the
  1331. condition is `True`, the out array will be set to the ufunc result.
  1332. Elsewhere, the out array will retain its original value. Note that
  1333. if an uninitialized out array is created via the default ``out=None``,
  1334. locations within it where the condition is `False` will remain
  1335. uninitialized.
  1336. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1337. output Tensor.
  1338. Returns:
  1339. Tensor or scalar, the output array, element-wise Heaviside step function
  1340. of `x1`. This is a scalar if both `x1` and `x2` are scalars.
  1341. Raises:
  1342. TypeError: if the input is not a tensor.
  1343. Supported Platforms:
  1344. ``Ascend`` ``GPU`` ``CPU``
  1345. Examples:
  1346. >>> output = np.heaviside(np.array([-1.5, 0, 2.0]), np.array(0.5))
  1347. >>> print(output)
  1348. [0. 0.5 1. ]
  1349. >>> output = np.heaviside(np.array([-1.5, 0, 2.0]), np.array(1))
  1350. >>> print(output)
  1351. [0. 1. 1.]
  1352. """
  1353. def _heaviside(x1, x2):
  1354. """Computes heaviside without passing keyword arguments"""
  1355. # performs type promotion
  1356. dtype1 = F.dtype(x1)
  1357. dtype2 = F.dtype(x2)
  1358. dtype_out = _promote(dtype1, dtype2)
  1359. if not _check_same_type(dtype1, dtype_out):
  1360. x1 = F.cast(x1, dtype_out)
  1361. if not _check_same_type(dtype2, dtype_out):
  1362. x2 = F.cast(x2, dtype_out)
  1363. # performs broadcast
  1364. shape_out = _infer_out_shape(F.shape(x1), F.shape(x2))
  1365. x1 = _broadcast_to_shape(x1, shape_out)
  1366. x2 = _broadcast_to_shape(x2, shape_out)
  1367. x2 = F.select(x1 < 0, zeros(shape_out, dtype_out), x2)
  1368. x2 = F.select(x1 > 0, ones(shape_out, dtype_out), x2)
  1369. return x2
  1370. return _apply_tensor_op(_heaviside, x1, x2, out=out, where=where, dtype=dtype)
  1371. def amax(a, axis=None, keepdims=False, initial=None, where=True):
  1372. """
  1373. Returns the maximum of an array or maximum along an axis.
  1374. Note:
  1375. Numpy argument `out` is not supported.
  1376. On GPU, the supported dtypes are np.float16, and np.float32.
  1377. Args:
  1378. a (Tensor): Input data.
  1379. axis (None or int or tuple of ints, optional): defaults to None. Axis or
  1380. axes along which to operate. By default, flattened input is used. If
  1381. this is a tuple of ints, the maximum is selected over multiple axes,
  1382. instead of a single axis or all the axes as before.
  1383. keepdims (boolean, optional): defaults to False.
  1384. If this is set to True, the axes which are reduced are left in the
  1385. result as dimensions with size one. With this option, the result will
  1386. broadcast correctly against the input array.
  1387. initial (scalar, optional):
  1388. The minimum value of an output element. Must be present to allow
  1389. computation on empty slice.
  1390. where (boolean Tensor, optional): defaults to True.
  1391. A boolean array which is broadcasted to match the dimensions of array,
  1392. and selects elements to include in the reduction. If non-default value
  1393. is passed, initial must also be provided.
  1394. Returns:
  1395. Tensor or scalar, maximum of `a`. If `axis` is None, the result is a scalar
  1396. value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``.
  1397. Raises:
  1398. TypeError: if the input is not a tensor.
  1399. Supported Platforms:
  1400. ``Ascend`` ``GPU`` ``CPU``
  1401. Examples:
  1402. >>> a = np.arange(4).reshape((2,2)).astype('float32')
  1403. >>> output = np.amax(a)
  1404. >>> print(output)
  1405. 3.0
  1406. >>> output = np.amax(a, axis=0)
  1407. >>> print(output)
  1408. [2. 3.]
  1409. >>> output = np.amax(a, axis=1)
  1410. >>> print(output)
  1411. [1. 3.]
  1412. >>> output = np.amax(a, where=np.array([False, True]), initial=-1, axis=0)
  1413. >>> print(output)
  1414. [-1. 3.]
  1415. """
  1416. return _reduce(a, P.ReduceMax(keepdims), F.maximum, axis=axis, keepdims=keepdims,
  1417. initial=initial, where=where)
  1418. def amin(a, axis=None, keepdims=False, initial=None, where=True):
  1419. """
  1420. Returns the minimum of an array or minimum along an axis.
  1421. Note:
  1422. Numpy argument `out` is not supported.
  1423. On GPU, the supported dtypes are np.float16, and np.float32.
  1424. Args:
  1425. a (Tensor): Input data.
  1426. axis (None or int or tuple of ints, optional): defaults to None. Axis or
  1427. axes along which to operate. By default, flattened input is used. If
  1428. this is a tuple of ints, the maximum is selected over multiple axes,
  1429. instead of a single axis or all the axes as before.
  1430. keepdims (boolean, optional): defaults to False.
  1431. If this is set to True, the axes which are reduced are left in the
  1432. result as dimensions with size one. With this option, the result will
  1433. broadcast correctly against the input array.
  1434. initial (scalar, optional):
  1435. The maximum value of an output element. Must be present to allow
  1436. computation on empty slice.
  1437. where (boolean Tensor, optional): defaults to True.
  1438. A boolean array which is broadcasted to match the dimensions of array,
  1439. and selects elements to include in the reduction. If non-default value
  1440. is passed, initial must also be provided.
  1441. Returns:
  1442. Tensor or scalar, minimum of `a`. If axis is None, the result is a scalar
  1443. value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``.
  1444. Raises:
  1445. TypeError: if the input is not a tensor.
  1446. Supported Platforms:
  1447. ``Ascend`` ``GPU`` ``CPU``
  1448. Examples:
  1449. >>> a = np.arange(4).reshape((2,2)).astype('float32')
  1450. >>> output = np.amin(a)
  1451. >>> print(output)
  1452. 0.0
  1453. >>> output = np.amin(a, axis=0)
  1454. >>> print(output)
  1455. [0. 1.]
  1456. >>> output = np.amin(a, axis=1)
  1457. >>> print(output)
  1458. [1. 3.]
  1459. >>> output = np.amax(a, where=np.array([False, True]), initial=10, axis=0)
  1460. >>> print(output)
  1461. [10. 1.]
  1462. """
  1463. return _reduce(a, P.ReduceMin(keepdims), F.minimum, axis=axis, keepdims=keepdims,
  1464. initial=initial, where=where)
  1465. def hypot(x1, x2, out=None, where=True, dtype=None):
  1466. """
  1467. Given the “legs” of a right triangle, returns its hypotenuse.
  1468. Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or `x2` is scalar_like
  1469. (i.e., unambiguously cast-able to a scalar type), it is broadcast for use
  1470. with each element of the other argument. (See Examples)
  1471. Note:
  1472. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  1473. not supported.
  1474. When `where` is provided, `out` must have a tensor value. `out` is not supported
  1475. for storing the result, however it can be used in combination with `where` to set
  1476. the value at indices for which `where` is set to False.
  1477. On GPU, the supported dtypes are np.float16 and np.float32.
  1478. On CPU, the supported dtypes are np.float16, np.float32, and np.float64.
  1479. Args:
  1480. x1 (Tensor): Leg of the traingle(s).
  1481. x2 (Tensor): Leg of the triangle(s). If ``x1.shape != x2.shape``, they
  1482. must be broadcastable to a common shape (which becomes the shape of
  1483. the output).
  1484. out (Tensor or None, optional): defaults to None.
  1485. where (Tensor or None, optional): For any non-default value of type other
  1486. than :class:`Tensor` or :class:`None`, the output retains its original value.
  1487. This condition is broadcasted over the input. At locations where the
  1488. condition is `True`, the out array will be set to the ufunc result.
  1489. Elsewhere, the out array will retain its original value. Note that
  1490. if an uninitialized out array is created via the default ``out=None``,
  1491. locations within it where the condition is `False` will remain
  1492. uninitialized.
  1493. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1494. output Tensor.
  1495. Returns:
  1496. Tensor or scalar, the hypotenuse of the triangle(s). This is a scalar if
  1497. both `x1` and `x2` are scalars.
  1498. Raises:
  1499. TypeError: if the input is not a tensor.
  1500. Supported Platforms:
  1501. ``Ascend`` ``GPU`` ``CPU``
  1502. Examples:
  1503. >>> output = np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
  1504. >>> print(output)
  1505. [[5. 5. 5.]
  1506. [5. 5. 5.]
  1507. [5. 5. 5.]]
  1508. >>> output = np.hypot(3*np.ones((3, 3)), np.array([4]))
  1509. >>> print(output)
  1510. [[5. 5. 5.]
  1511. [5. 5. 5.]
  1512. [5. 5. 5.]]
  1513. """
  1514. def _hypot(x1, x2):
  1515. """Computes hypotenuse without passing keyword arguments"""
  1516. if _get_device() == 'CPU':
  1517. # broadcast is not fully supported in tensor_add on CPU,
  1518. # so we use tensor_sub as a substitute solution
  1519. return F.sqrt(F.tensor_sub(F.square(x1), F.neg_tensor(F.square(x2))))
  1520. return F.sqrt(F.tensor_add(F.square(x1), F.square(x2)))
  1521. return _apply_tensor_op(_hypot, x1, x2, out=out, where=where, dtype=dtype)
  1522. def floor(x, out=None, where=True, dtype=None):
  1523. """
  1524. Returns the floor of the input, element-wise.
  1525. The floor of the scalar `x` is the largest integer `i`, such that ``i <= x``.
  1526. Note:
  1527. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  1528. not supported.
  1529. When `where` is provided, `out` must have a tensor value. `out` is not supported
  1530. for storing the result, however it can be used in combination with `where` to set
  1531. the value at indices for which `where` is set to False.
  1532. On GPU, the supported dtypes are np.float16 and np.float32.
  1533. On CPU, the supported dtypes are np.float16, np.float32, and np.float64.
  1534. Args:
  1535. x (Tensor): input data.
  1536. out (Tensor or None, optional): defaults to None.
  1537. where (Tensor or None, optional): For any non-default value of type other
  1538. than :class:`Tensor` or :class:`None`, the output retains its original value.
  1539. This condition is broadcasted over the input. At locations where the
  1540. condition is `True`, the out array will be set to the ufunc result.
  1541. Elsewhere, the out array will retain its original value. Note that
  1542. if an uninitialized out array is created via the default ``out=None``,
  1543. locations within it where the condition is `False` will remain
  1544. uninitialized.
  1545. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1546. output Tensor.
  1547. Returns:
  1548. Tensor or scalar, the floor of each element in `x`. This is a scalar if `x`
  1549. is a scalar.
  1550. Raises:
  1551. TypeError: if the input is not a tensor.
  1552. Supported Platforms:
  1553. ``Ascend`` ``GPU`` ``CPU``
  1554. Examples:
  1555. >>> output = np.floor(np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]))
  1556. >>> print(output)
  1557. [-2. -2. -1. 0. 1. 1. 2.]
  1558. """
  1559. return _apply_tensor_op(F.floor, x, out=out, where=where, dtype=dtype)
  1560. def floor_divide(x1, x2, out=None, where=True, dtype=None):
  1561. """
  1562. Returns the largest integer smaller or equal to the division of the inputs.
  1563. It is equivalent to the Python // operator and pairs with the
  1564. Python % (remainder), function so that ``a = a % b + b * (a // b)`` up to roundoff.
  1565. Note:
  1566. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  1567. not supported.
  1568. When `where` is provided, `out` must have a tensor value. `out` is not supported
  1569. for storing the result, however it can be used in combination with `where` to set
  1570. the value at indices for which `where` is set to False.
  1571. Args:
  1572. x1 (Tensor): Input array.
  1573. x2 (Tensor): Input array.
  1574. out (Tensor or None, optional): defaults to None.
  1575. where (Tensor or None, optional): For any non-default value of type other
  1576. than :class:`Tensor` or :class:`None`, the output retains its original value.
  1577. This condition is broadcasted over the input. At locations where the
  1578. condition is `True`, the out array will be set to the ufunc result.
  1579. Elsewhere, the out array will retain its original value. Note that
  1580. if an uninitialized out array is created via the default ``out=None``,
  1581. locations within it where the condition is `False` will remain
  1582. uninitialized.
  1583. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1584. output Tensor.
  1585. Returns:
  1586. Tensor or scalar.
  1587. Raises:
  1588. TypeError: if the input is not a tensor.
  1589. Supported Platforms:
  1590. ``Ascend`` ``GPU`` ``CPU``
  1591. Examples:
  1592. >>> output = np.floor_divide(np.array([1., 2., 3., 4.]), np.array(2.5))
  1593. >>> print(output)
  1594. [0. 0. 1. 1.]
  1595. """
  1596. return _apply_tensor_op(F.tensor_floordiv, x1, x2, out=out, where=where, dtype=dtype)
  1597. def _remainder(x1, x2, C_style=False):
  1598. """Computes remainder without applying keyword arguments."""
  1599. dtype = _promote(F.dtype(x1), F.dtype(x2))
  1600. if not _check_is_float(dtype):
  1601. x1 = F.cast(x1, mstype.float32)
  1602. x2 = F.cast(x2, mstype.float32)
  1603. quotient = F.tensor_div(x1, x2)
  1604. if C_style:
  1605. quotient = fix(quotient)
  1606. else:
  1607. quotient = F.floor(quotient)
  1608. prod = F.tensor_mul(x2, quotient)
  1609. res = F.tensor_sub(x1, prod)
  1610. if _check_is_int(dtype):
  1611. zeros_tensor = zeros(F.shape(quotient), F.dtype(quotient))
  1612. x2_zeros = F.equal(x2, zeros_tensor)
  1613. res = F.select(x2_zeros, zeros_tensor, res)
  1614. if not _check_same_type(F.dtype(res), dtype):
  1615. res = F.cast(res, dtype)
  1616. return res
  1617. def remainder(x1, x2, out=None, where=True, dtype=None):
  1618. """
  1619. Returns element-wise remainder of division.
  1620. Computes the remainder complementary to the floor_divide function. It is
  1621. equivalent to the Python modulus operator ``x1 % x2`` and has the same sign
  1622. as the divisor `x2`. The MATLAB function equivalent to np.remainder is mod.
  1623. Note:
  1624. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  1625. not supported.
  1626. When `where` is provided, `out` must have a tensor value. `out` is not supported
  1627. for storing the result, however it can be used in combination with `where` to set
  1628. the value at indices for which `where` is set to False.
  1629. Args:
  1630. x1 (Tensor): input array.
  1631. x2 (Tensor): input array.
  1632. out (Tensor or None, optional): defaults to None.
  1633. where (Tensor or None, optional): For any non-default value of type other
  1634. than :class:`Tensor` or :class:`None`, the output retains its original value.
  1635. This condition is broadcasted over the input. At locations where the
  1636. condition is `True`, the out array will be set to the ufunc result.
  1637. Elsewhere, the out array will retain its original value. Note that
  1638. if an uninitialized out array is created via the default ``out=None``,
  1639. locations within it where the condition is `False` will remain
  1640. uninitialized.
  1641. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1642. output Tensor.
  1643. Returns:
  1644. Tensor or scalar, the element-wise remainder of the quotient
  1645. ``floor_divide(x1, x2)``. This is a scalar if both `x1` and `x2` are scalars.
  1646. Raises:
  1647. TypeError: if the input is not a tensor.
  1648. Supported Platforms:
  1649. ``Ascend`` ``GPU`` ``CPU``
  1650. Examples:
  1651. >>> output = np.remainder(np.array([4, 7]), np.array([2, 3]))
  1652. >>> print(output)
  1653. [0 1]
  1654. >>> output = np.remainder(np.arange(7), np.array(5))
  1655. >>> print(output)
  1656. [0 1 2 3 4 0 1]
  1657. """
  1658. return _apply_tensor_op(_remainder, x1, x2, out=out, where=where, dtype=dtype)
  1659. def fix(x):
  1660. """
  1661. Rounds to nearest integer towards zero.
  1662. Rounds an array of floats element-wise to nearest integer towards zero. The
  1663. rounded values are returned as floats.
  1664. Note:
  1665. Numpy argument `out` is not supported.
  1666. Args:
  1667. x (Tensor): An array of floats to be rounded.
  1668. Returns:
  1669. Tensor.
  1670. Raises:
  1671. TypeError: if the input is not a tensor.
  1672. Supported Platforms:
  1673. ``Ascend`` ``GPU`` ``CPU``
  1674. Examples:
  1675. >>> output = np.fix(np.array([2.1, 2.9, -2.1, -2.9]))
  1676. >>> print(output)
  1677. [ 2. 2. -2. -2.]
  1678. """
  1679. _check_input_tensor(x)
  1680. if not _check_is_float(F.dtype(x)):
  1681. x = F.cast(x, mstype.float32)
  1682. floored = F.floor(x)
  1683. # TODO change to F.ceil once supported on CPU.
  1684. ceiled = F.neg_tensor(F.floor(F.neg_tensor(x)))
  1685. is_neg = F.tensor_lt(x, zeros(F.shape(x), F.dtype(x)))
  1686. return F.select(is_neg, ceiled, floored)
  1687. def fmod(x1, x2, out=None, where=True, dtype=None):
  1688. """
  1689. Returns the element-wise remainder of division.
  1690. This is the NumPy implementation of the C library function fmod, the remainder
  1691. has the same sign as the dividend `x1`. It is equivalent to the Matlab(TM) rem
  1692. function and should not be confused with the Python modulus operator ``x1 % x2``.
  1693. Note:
  1694. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  1695. not supported.
  1696. When `where` is provided, `out` must have a tensor value. `out` is not supported
  1697. for storing the result, however it can be used in combination with `where` to set
  1698. the value at indices for which `where` is set to False.
  1699. Args:
  1700. x1 (Tensor)
  1701. x2 (Tensor): input arrays.
  1702. out (Tensor or None, optional): defaults to None.
  1703. where (Tensor or None, optional): For any non-default value of type other
  1704. than :class:`Tensor` or :class:`None`, the output retains its original value.
  1705. This condition is broadcasted over the input. At locations where the
  1706. condition is `True`, the out array will be set to the ufunc result.
  1707. Elsewhere, the out array will retain its original value. Note that
  1708. if an uninitialized out array is created via the default ``out=None``,
  1709. locations within it where the condition is `False` will remain
  1710. uninitialized.
  1711. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1712. output Tensor.
  1713. Returns:
  1714. Tensor or scalar, the remainder of the division of `x1` by `x2`. This is a
  1715. scalar if both `x1` and `x2` are scalars.
  1716. Raises:
  1717. TypeError: if the input is not a tensor.
  1718. Supported Platforms:
  1719. ``Ascend`` ``GPU`` ``CPU``
  1720. Examples:
  1721. >>> output = np.fmod(np.array([-3, -2, -1, 1, 2, 3]), np.array(2))
  1722. >>> print(output)
  1723. [-1 0 -1 1 0 1]
  1724. """
  1725. return _apply_tensor_op(lambda x1, x2: _remainder(x1, x2, C_style=True), x1, x2,
  1726. out=out, where=where, dtype=dtype)
  1727. def trunc(x, out=None, where=True, dtype=None):
  1728. """
  1729. Returns the truncated value of the input, element-wise.
  1730. The truncated value of the scalar `x` is the nearest integer `i` which is closer to zero
  1731. than `x` is. In short, the fractional part of the signed number `x` is discarded.
  1732. Note:
  1733. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  1734. not supported.
  1735. When `where` is provided, `out` must have a tensor value. `out` is not supported
  1736. for storing the result, however it can be used in combination with `where` to set
  1737. the value at indices for which `where` is set to False.
  1738. Args:
  1739. x (Tensor): input data.
  1740. out (Tensor or None, optional): defaults to None.
  1741. where (Tensor or None, optional): For any non-default value of type other
  1742. than :class:`Tensor` or :class:`None`, the output retains its original value.
  1743. This condition is broadcasted over the input. At locations where the
  1744. condition is `True`, the out array will be set to the ufunc result.
  1745. Elsewhere, the out array will retain its original value. Note that
  1746. if an uninitialized out array is created via the default ``out=None``,
  1747. locations within it where the condition is `False` will remain
  1748. uninitialized.
  1749. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1750. output Tensor.
  1751. Returns:
  1752. Tensor or scalar, the truncated value of each element in `x`. This is a scalar if `x` is
  1753. a scalar.
  1754. Raises:
  1755. TypeError: if the input is not a tensor.
  1756. Supported Platforms:
  1757. ``Ascend`` ``GPU`` ``CPU``
  1758. Examples:
  1759. >>> output = np.trunc(np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]))
  1760. >>> print(output)
  1761. [-1. -1. -0. 0. 1. 1. 2.]
  1762. """
  1763. return _apply_tensor_op(fix, x, out=out, where=where, dtype=dtype)
  1764. def exp(x, out=None, where=True, dtype=None):
  1765. """
  1766. Calculates the exponential of all elements in the input array.
  1767. Note:
  1768. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  1769. not supported.
  1770. When `where` is provided, `out` must have a tensor value. `out` is not supported
  1771. for storing the result, however it can be used in combination with `where` to set
  1772. the value at indices for which `where` is set to False.
  1773. On GPU, the supported dtypes are np.float16, and np.float32.
  1774. On CPU, the supported dtypes are np.float16, np.float32, np.float64.
  1775. Args:
  1776. x (Tensor): input data.
  1777. out (Tensor or None, optional): defaults to None.
  1778. where (Tensor or None, optional): For any non-default value of type other
  1779. than :class:`Tensor` or :class:`None`, the output retains its original value.
  1780. This condition is broadcasted over the input. At locations where the
  1781. condition is `True`, the out array will be set to the ufunc result.
  1782. Elsewhere, the out array will retain its original value. Note that
  1783. if an uninitialized out array is created via the default ``out=None``,
  1784. locations within it where the condition is `False` will remain
  1785. uninitialized.
  1786. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1787. output Tensor.
  1788. Returns:
  1789. Tensor or scalar, element-wise exponential of `x`. This is a scalar if both
  1790. `x1` and `x2` are scalars.
  1791. Raises:
  1792. TypeError: if the input is not a tensor.
  1793. Supported Platforms:
  1794. ``Ascend`` ``GPU`` ``CPU``
  1795. Examples:
  1796. >>> output = np.exp(np.arange(5).astype(np.float32))
  1797. >>> print(output)
  1798. [ 1. 2.718282 7.3890557 20.085537 54.598145 ]
  1799. """
  1800. return _apply_tensor_op(F.tensor_exp, x, out=out, where=where, dtype=dtype)
  1801. def expm1(x, out=None, where=True, dtype=None):
  1802. """
  1803. Calculates ``exp(x) - 1`` for all elements in the array.
  1804. Note:
  1805. Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
  1806. not supported.
  1807. When `where` is provided, `out` must have a tensor value. `out` is not supported
  1808. for storing the result, however it can be used in combination with `where` to set
  1809. the value at indices for which `where` is set to False.
  1810. On GPU, the supported dtypes are np.float16, and np.float32.
  1811. On CPU, the supported dtypes are np.float16, and np.float32.
  1812. Args:
  1813. x (Tensor): input data.
  1814. out (Tensor or None, optional): defaults to None.
  1815. where (Tensor or None, optional): For any non-default value of type other
  1816. than :class:`Tensor` or :class:`None`, the output retains its original value.
  1817. This condition is broadcasted over the input. At locations where the
  1818. condition is `True`, the out array will be set to the ufunc result.
  1819. Elsewhere, the out array will retain its original value. Note that
  1820. if an uninitialized out array is created via the default ``out=None``,
  1821. locations within it where the condition is `False` will remain
  1822. uninitialized.
  1823. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1824. output Tensor.
  1825. Returns:
  1826. Tensor or scalar, element-wise exponential minus one, ``out = exp(x) - 1``.
  1827. This is a scalar if both `x1` and `x2` are scalars.
  1828. Raises:
  1829. TypeError: if the input is not a tensor.
  1830. Supported Platforms:
  1831. ``Ascend`` ``GPU`` ``CPU``
  1832. Examples:
  1833. >>> output = np.expm1(np.arange(5).astype(np.float32))
  1834. >>> print(output)
  1835. [ 0. 1.7182819 6.389056 19.085537 53.59815 ]
  1836. """
  1837. return _apply_tensor_op(F.tensor_expm1, x, out=out, where=where, dtype=dtype)
  1838. @constexpr
  1839. def _real_axes(ndim_orig, ndim_out, axes_orig):
  1840. """Returns the real axes to be reduced after performing broadcast"""
  1841. diff = ndim_out - ndim_orig
  1842. axes = F.make_range(diff)
  1843. axes_orig = map(functools.partial(operator.add, diff), axes_orig)
  1844. return axes + tuple(axes_orig)
  1845. @constexpr
  1846. def _shape_reduced_keepdims(shape, axes):
  1847. """
  1848. Reduces dimensions corresponding to argument axes while
  1849. keeping the number of dimensions unchanged.
  1850. """
  1851. ndim_out = F.tuple_len(shape)
  1852. shape_out = [1]*ndim_out
  1853. for i in range(ndim_out):
  1854. if not i in axes:
  1855. shape_out[i] = shape[i]
  1856. return tuple(shape_out)
  1857. @constexpr
  1858. def _shape_reduced(shape, axes):
  1859. """Removes dimensions corresponding to argument axes"""
  1860. ndim_orig = F.tuple_len(shape)
  1861. ndim_out = ndim_orig - F.tuple_len(axes)
  1862. shape_out = [0]*ndim_out
  1863. idx_out = 0
  1864. for i in range(ndim_orig):
  1865. if not i in axes:
  1866. shape_out[idx_out] = shape[i]
  1867. idx_out += 1
  1868. return tuple(shape_out)
  1869. def _infer_shape_rem(shape1, shape2, ndim1, ndim2, transpose_b):
  1870. """Infers the shape of the last two dimensions after performing matmul."""
  1871. shape_rem = ()
  1872. if ndim1 >= 2:
  1873. shape_rem += (shape1[-2],)
  1874. if transpose_b:
  1875. if ndim2 >= 2:
  1876. shape_rem += (shape2[-2],)
  1877. else:
  1878. if ndim1 >= 1:
  1879. shape_rem += (shape2[-1],)
  1880. return shape_rem
  1881. def _reduce(a, reduce_fn, cmp_fn, axis=None, keepdims=False, initial=None, where=True):
  1882. """Applies comparison based on cmp_fn and reduction based on reduce_fn"""
  1883. _check_input_tensor(a)
  1884. shape = F.shape(a)
  1885. ndim = F.rank(a)
  1886. dtype = F.dtype(a)
  1887. axes = _check_axis_valid(axis, ndim)
  1888. if _is_shape_empty(shape):
  1889. if not axes:
  1890. return a
  1891. if keepdims:
  1892. shape_out = _shape_reduced_keepdims(shape, axes)
  1893. else:
  1894. shape_out = _shape_reduced(shape, axes)
  1895. if _is_shape_empty(shape_out):
  1896. return empty(F.dtype(a), shape_out)
  1897. if initial is None:
  1898. return _raise_value_error('initial value must be provided for zero-size arrays')
  1899. return full(shape_out, initial, dtype)
  1900. if initial is not None:
  1901. initial = full(shape, initial, dtype)
  1902. a = cmp_fn(a, initial)
  1903. if not axes:
  1904. return a
  1905. if isinstance(where, Tensor):
  1906. if initial is None:
  1907. return _raise_value_error('initial value must be provided for where masks')
  1908. ndim_orig = F.rank(a)
  1909. a = where_(where, a, initial)
  1910. axes = _real_axes(ndim_orig, F.rank(a), axes)
  1911. return reduce_fn(a, axes)
  1912. def positive(a, out=None, where=True, dtype=None):
  1913. """
  1914. Numerical positive, element-wise.
  1915. Note:
  1916. Numpy arguments casting, order, subok, signature, and extobj are
  1917. not supported.
  1918. Args:
  1919. a (Tensor): Input tensor.
  1920. out (Tensor or None, optional): defaults to None.
  1921. where (Tensor or None, optional): For any non-default value of type other
  1922. than :class:`Tensor` or :class:`None`, the output retains its original value.
  1923. This condition is broadcasted over the input. At locations where the
  1924. condition is `True`, the out array will be set to the ufunc result.
  1925. Elsewhere, the out array will retain its original value. Note that
  1926. if an uninitialized out array is created via the default ``out=None``,
  1927. locations within it where the condition is `False` will remain
  1928. uninitialized.
  1929. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1930. output Tensor.
  1931. Returns:
  1932. Tensor.
  1933. Supported Platforms:
  1934. ``Ascend`` ``GPU`` ``CPU``
  1935. Examples:
  1936. >>> import mindspore.numpy as np
  1937. >>> a = np.asarray([1, -1])
  1938. >>> output = np.positive(a)
  1939. >>> print(output)
  1940. [1, -1]
  1941. """
  1942. _check_input_tensor(a)
  1943. neg_tensor = F.neg_tensor(a)
  1944. return _apply_tensor_op(F.neg_tensor, neg_tensor, out=out, where=where, dtype=dtype)
  1945. def negative(a, out=None, where=True, dtype=None):
  1946. """
  1947. Numerical negative, element-wise.
  1948. Note:
  1949. Numpy arguments `casting`, `order`, `subok`, `signature`, and `extobj` are
  1950. not supported.
  1951. Args:
  1952. a (Tensor): Input tensor.
  1953. out (Tensor or None, optional): defaults to None.
  1954. where (Tensor or None, optional): For any non-default value of type other
  1955. than :class:`Tensor` or :class:`None`, the output retains its original value.
  1956. This condition is broadcasted over the input. At locations where the
  1957. condition is `True`, the out array will be set to the ufunc result.
  1958. Elsewhere, the out array will retain its original value. Note that
  1959. if an uninitialized out array is created via the default ``out=None``,
  1960. locations within it where the condition is `False` will remain
  1961. uninitialized.
  1962. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1963. output Tensor.
  1964. Returns:
  1965. Tensor.
  1966. Supported Platforms:
  1967. ``Ascend`` ``GPU`` ``CPU``
  1968. Examples:
  1969. >>> import mindspore.numpy as np
  1970. >>> a = np.asarray([1, -1])
  1971. >>> output = np.negative(a)
  1972. >>> print(output)
  1973. [-1, 1]
  1974. """
  1975. _check_input_tensor(a)
  1976. return _apply_tensor_op(F.neg_tensor, a, out=out, where=where, dtype=dtype)
  1977. def cumsum(a, axis=None, dtype=None):
  1978. """
  1979. Returns the cumulative sum of the elements along a given axis.
  1980. Args:
  1981. a (Tensor): Input tensor.
  1982. axis (int, optional): Axis along which the cumulative sum is computed. The
  1983. default (None) is to compute the cumsum over the flattened array.
  1984. dtype (:class:`mindspore.dtype`, optional): If not specified, stay the same as `a`,
  1985. unless `a` has an integer dtype with a precision less than that of the
  1986. default platform integer. In that case, the default platform integer
  1987. is used.
  1988. Returns:
  1989. Tensor.
  1990. Raises:
  1991. TypeError: If input arguments have types not specified above.
  1992. ValueError: If axis is out of range.
  1993. Supported Platforms:
  1994. ``Ascend`` ``GPU`` ``CPU``
  1995. Examples:
  1996. >>> output = np.cumsum(np.ones((3,3)), axis=0)
  1997. >>> print(output)
  1998. [[1. 1. 1.]
  1999. [2. 2. 2.]
  2000. [3. 3. 3.]]
  2001. """
  2002. _check_input_tensor(a)
  2003. original_dtype = F.dtype(a)
  2004. # If original array is int, and has precision less then int32, convert to int32
  2005. if _check_same_type(original_dtype, mstype.bool_) or \
  2006. _check_same_type(original_dtype, mstype.int8) or \
  2007. _check_same_type(original_dtype, mstype.int16):
  2008. original_dtype = mstype.int32
  2009. a = a.astype(mstype.float32)
  2010. if axis is None:
  2011. a = a.ravel()
  2012. axis = 0
  2013. _check_axis_in_range(axis, a.ndim)
  2014. if dtype is not None and not _check_same_type(original_dtype, dtype):
  2015. return _cumsum_default(a, axis).astype(dtype, copy=False)
  2016. return _cumsum_default(a, axis).astype(original_dtype, copy=False)
  2017. def _apply_tensor_op(fn, *args, out=None, where=True, dtype=None):
  2018. """Applies tensor operations based on fn"""
  2019. _check_input_tensor(*args)
  2020. res = fn(*args)
  2021. # if out is set to a non-default value, return tensor will have the same
  2022. # dtype as out, which overrides the dtype passed into the keyword argument
  2023. if isinstance(out, Tensor):
  2024. dtype_out = F.dtype(out)
  2025. elif dtype is not None:
  2026. dtype_out = dtype
  2027. else:
  2028. dtype_out = F.dtype(res)
  2029. if isinstance(out, Tensor) and isinstance(where, Tensor):
  2030. out = where_(where, res, out)
  2031. elif out is None or where is not None:
  2032. out = res
  2033. if not _check_same_type(F.dtype(out), dtype_out):
  2034. out = F.cast(out, dtype_out)
  2035. return out