You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

standard_method.py 56 kB

4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794
  1. # This is the Python adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
  2. #
  3. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License");
  6. # you may not use this file except in compliance with the License.
  7. # You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. # ============================================================================
  17. """standard_method"""
  18. from dataclasses import dataclass
  19. from mindspore import Tensor, Parameter
  20. from mindspore import dtype as mstype
  21. from ..._checkparam import Validator as validator
  22. from ...ops import functional as F
  23. from ...ops import operations as P
  24. from ...ops.composite import tail, core, MultitypeFuncGraph, env_get, hyper_add, \
  25. zeros_like, ones_like, repeat_elements
  26. from ...ops.composite.base import _append
  27. from ...ops.composite.multitype_ops import _constexpr_utils as const_utils
  28. from ...ops.composite.multitype_ops import _compile_utils as compile_utils
  29. from ...ops.primitive import constexpr
  30. __all__ = ['MultitypeFuncGraph', 'env_get', 'hyper_add', 'zeros_like', 'ones_like']
  31. shape_ = P.Shape()
  32. dtype_ = P.DType()
  33. abs_ = P.Abs()
  34. ndim_ = P.Rank()
  35. cumsum_ = P.CumSum()
  36. size_op_ = P.Size()
  37. _reduce_sum_default = P.ReduceSum()
  38. _reduce_sum_keepdims = P.ReduceSum(True)
  39. _mean_keepdims = P.ReduceMean(True)
  40. itemsize_map = {mstype.bool_: 1, mstype.int8: 1, mstype.uint8: 1,
  41. mstype.float16: 2, mstype.int16: 2, mstype.uint16: 2,
  42. mstype.float32: 4, mstype.int32: 4, mstype.uint32: 4,
  43. mstype.float64: 8, mstype.int64: 8, mstype.uint64: 8}
  44. nan_tensor = Tensor(float('nan'), dtype=mstype.float32)
  45. def mean(x, axis=(), keep_dims=False):
  46. """
  47. Reduces a dimension of a tensor by averaging all elements in the dimension.
  48. Args:
  49. axis (Union[None, int, tuple(int), list(int)]): Dimensions of reduction,
  50. when axis is None or empty tuple, reduce all dimensions. Default: ().
  51. keep_dims (bool): Whether to keep the reduced dimensions. Default: False.
  52. Returns:
  53. Tensor, has the same data type as input tensor.
  54. Supported Platforms:
  55. ``Ascend`` ``GPU`` ``CPU``
  56. Examples:
  57. >>> import numpy as np
  58. >>> from mindspore import Tensor
  59. >>> input_x = Tensor(np.array([1, 2, 3], dtype=np.float32))
  60. >>> output = input_x.mean()
  61. >>> print(output)
  62. 2.0
  63. """
  64. if axis is None:
  65. axis = ()
  66. reduce_mean = P.ReduceMean(keep_dims)
  67. return reduce_mean(x, axis)
  68. def all_(x, axis=(), keep_dims=False):
  69. """
  70. Check all array elements along a given axis evaluate to True.
  71. Args:
  72. x (Tensor): A Tensor to be reduced.
  73. axis (Union[None, int, tuple(int)): Dimensions of reduction.
  74. keep_dims (bool): Whether to keep the reduced dimensions.
  75. Returns:
  76. Tensor, has the same data type as x.
  77. """
  78. if axis is None:
  79. axis = ()
  80. reduce_all = P.ReduceAll(keep_dims)
  81. return reduce_all(x, axis)
  82. def any_(x, axis=(), keep_dims=False):
  83. """
  84. Check any array element along a given axis evaluate to True.
  85. Args:
  86. x (Tensor): A Tensor to be reduced.
  87. axis (Union[None, int, tuple(int)): Dimensions of reduction.
  88. keep_dims (bool): Whether to keep the reduced dimensions.
  89. Returns:
  90. Tensor, has the same data type as x.
  91. """
  92. if axis is None:
  93. axis = ()
  94. reduce_any = P.ReduceAny(keep_dims)
  95. return reduce_any(x, axis)
  96. def size_(x):
  97. """
  98. Return the number of elements in tensor `x`.
  99. Note:
  100. To strictly follow Numpy's behaviour, return 1 for tensor scalar.
  101. Args:
  102. x (Tensor): Input tensor.
  103. Returns:
  104. size(int).
  105. """
  106. if not shape_(x):
  107. return size_op_(x) + 1
  108. return size_op_(x)
  109. def itemsize_(x):
  110. """
  111. Return length of one tensor element in bytes.
  112. Args:
  113. x (Tensor): Input tensor.
  114. Returns:
  115. itemsize(int).
  116. """
  117. return get_itemsize(x.dtype)
  118. def nbytes_(x):
  119. """
  120. Return total number of bytes taken by the tensor.
  121. Args:
  122. x (Tensor): Input tensor.
  123. Returns:
  124. nbytes(int).
  125. """
  126. return itemsize_(x) * F.shape_mul(shape_(x))
  127. def strides_(x):
  128. """
  129. Return the tuple of bytes to step in each dimension when traversing a tensor.
  130. Args:
  131. x (Tensor): Input tensor.
  132. Returns:
  133. strides (tuple[int]).
  134. """
  135. strides = ()
  136. ndim = P.Rank()(x)
  137. tensor_shape = shape_(x)
  138. for i in F.make_range(0, ndim):
  139. stride = itemsize_(x)
  140. for j in F.make_range(i + 1, ndim):
  141. stride *= tensor_shape[j]
  142. strides += (stride,)
  143. return strides
  144. def astype(x, dtype, copy=True): # pylint: disable=redefined-outer-name
  145. """
  146. Return a copy of the tensor, casted to a specified type.
  147. Args:
  148. dtype (Union[:class:`mindspore.dtype`, str]): Designated tensor dtype, can be in format
  149. of :class:`mindspore.dtype.float32` or `float32`.
  150. Default: :class:`mindspore.dtype.float32`.
  151. copy (bool, optional): By default, astype always returns a newly allocated
  152. tensor. If this is set to false, the input tensor is returned instead
  153. of a copy if possible. Default: True.
  154. Returns:
  155. Tensor, with the designated dtype.
  156. Raises:
  157. TypeError: If `dtype` has types not specified above, or values cannot be understood.
  158. Supported Platforms:
  159. ``Ascend`` ``GPU`` ``CPU``
  160. Examples:
  161. >>> import numpy as np
  162. >>> from mindspore import Tensor
  163. >>> x = Tensor(np.ones((1,2,2,1), dtype=np.float32))
  164. >>> x = x.astype("int32")
  165. >>> print(x.dtype)
  166. Int32
  167. """
  168. dtype = check_astype_dtype_const(dtype)
  169. if not copy and dtype == x.dtype:
  170. return x
  171. return F.cast(x, dtype)
  172. def transpose(x, *axis):
  173. r"""
  174. Return a view of the tensor with axes transposed.
  175. For a 1-D tensor this has no effect, as a transposed vector is simply the
  176. same vector. For a 2-D tensor, this is a standard matrix transpose. For a
  177. n-D tensor, if axes are given, their order indicates how the axes are permuted.
  178. If axes are not provided and tensor.shape = (i[0], i[1],...i[n-2], i[n-1]),
  179. then tensor.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0]).
  180. Args:
  181. axes(Union[None, tuple(int), list(int), int], optional): If axes is None or
  182. blank, tensor.transpose() will reverse the order of the axes. If axes is tuple(int)
  183. or list(int), tensor.transpose() will transpose the tensor to the new axes order.
  184. If axes is int, this form is simply intended as a convenience alternative to the
  185. tuple/list form.
  186. Returns:
  187. Tensor, has the same dimension as input tensor, with axes suitably permuted.
  188. Raises:
  189. TypeError: If input arguments have types not specified above.
  190. ValueError: If the number of `axes` is not euqal to a.ndim.
  191. Supported Platforms:
  192. ``Ascend`` ``GPU`` ``CPU``
  193. Examples:
  194. >>> import numpy as np
  195. >>> from mindspore import Tensor
  196. >>> x = Tensor(np.ones((1,2,3), dtype=np.float32))
  197. >>> x = x.transpose()
  198. >>> print(x.shape)
  199. (3, 2, 1)
  200. """
  201. ndim = F.rank(x)
  202. perm = check_transpose_axis_const(axis, ndim)
  203. return F.transpose(x, perm)
  204. # `tensor.T` is used as a property in graph mode
  205. T_ = transpose
  206. def reshape(x, *shape):
  207. """
  208. Give a new shape to a tensor without changing its data.
  209. Args:
  210. shape(Union[int, tuple(int), list(int)]): The new shape should be compatible
  211. with the original shape. If an integer, then the result will be a 1-D
  212. array of that length. One shape dimension can be -1. In this case, the
  213. value is inferred from the length of the array and remaining dimensions.
  214. Returns:
  215. Tensor, with new specified shape.
  216. Raises:
  217. TypeError: If new_shape is not integer, list or tuple, or `x` is not tensor.
  218. ValueError: If new_shape is not compatible with the original shape.
  219. Supported Platforms:
  220. ``Ascend`` ``GPU`` ``CPU``
  221. Examples:
  222. >>> from mindspore import Tensor
  223. >>> from mindspore import dtype as mstype
  224. >>> x = Tensor([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]], dtype=mstype.float32)
  225. >>> output = x.reshape((3, 2))
  226. >>> print(output)
  227. [[-0.1 0.3]
  228. [ 3.6 0.4]
  229. [ 0.5 -3.2]]
  230. """
  231. new_shape = check_reshape_shp_const(shape)
  232. return F.reshape(x, new_shape)
  233. def ravel(x):
  234. """
  235. Return a contiguous flattened tensor.
  236. Returns:
  237. Tensor, a 1-D tensor, containing the same elements of the input.
  238. Supported Platforms:
  239. ``Ascend`` ``GPU`` ``CPU``
  240. Examples:
  241. >>> import numpy as np
  242. >>> from mindspore import Tensor
  243. >>> x = Tensor(np.ones((2,3,4), dtype=np.float32))
  244. >>> output = x.ravel()
  245. >>> print(output.shape)
  246. (24,)
  247. """
  248. return reshape(x, (-1,))
  249. def flatten(x, order='C'):
  250. r"""
  251. Return a copy of the tensor collapsed into one dimension.
  252. Args:
  253. order (str, optional): Can choose between 'C' and 'F'. 'C' means to
  254. flatten in row-major (C-style) order. 'F' means to flatten in column-major
  255. (Fortran-style) order. Only 'C' and 'F' are supported. Default: 'C'.
  256. Returns:
  257. Tensor, has the same data type as input.
  258. Supported Platforms:
  259. ``Ascend`` ``GPU`` ``CPU``
  260. Raises:
  261. TypeError: If `order` is not string type.
  262. ValueError: If `order` is string type, but not 'C' or 'F'.
  263. Examples:
  264. >>> import numpy as np
  265. >>> from mindspore import Tensor
  266. >>> x = Tensor(np.ones((2,3,4), dtype=np.float32))
  267. >>> output = x.flatten()
  268. >>> print(output.shape)
  269. (24,)
  270. """
  271. order = check_flatten_order_const(order)
  272. if order == 'C':
  273. return F.reshape(x, (-1,))
  274. perm = F.make_range(0, F.rank(x))
  275. new_order = F.tuple_reversed(perm)
  276. return F.reshape(F.transpose(x, new_order), (-1,))
  277. def swapaxes(x, axis1, axis2):
  278. """
  279. Interchange two axes of a tensor.
  280. Args:
  281. axis1 (int): First axis.
  282. axis2 (int): Second axis.
  283. Returns:
  284. Transposed tensor, has the same data type as the input.
  285. Raises:
  286. TypeError: If `axis1` or `axis2` is not integer.
  287. ValueError: If `axis1` or `axis2` is not in the range of :math:`[-ndim, ndim-1]`.
  288. Supported Platforms:
  289. ``Ascend`` ``GPU`` ``CPU``
  290. Examples:
  291. >>> import numpy as np
  292. >>> from mindspore import Tensor
  293. >>> x = Tensor(np.ones((2,3,4), dtype=np.float32))
  294. >>> output = x.swapaxes(0, 2)
  295. >>> print(output.shape)
  296. (4,3,2)
  297. """
  298. axis1, axis2 = check_swapaxes_axis_const((axis1, axis2), x.ndim)
  299. if axis1 == axis2:
  300. return x
  301. if axis1 > axis2:
  302. axis1, axis2 = axis2, axis1
  303. perm = F.make_range(0, x.ndim)
  304. new_perm = None
  305. if axis2 + 1 < x.ndim:
  306. new_perm = perm[0:axis1] + perm[axis2:axis2 + 1] + \
  307. perm[axis1 + 1:axis2] + perm[axis1:axis1 + 1] + perm[axis2 + 1:]
  308. else:
  309. new_perm = perm[0:axis1] + perm[axis2:axis2 + 1] + \
  310. perm[axis1 + 1:axis2] + perm[axis1:axis1 + 1]
  311. return F.transpose(x, new_perm)
  312. def squeeze(x, axis=None):
  313. """
  314. Remove single-dimensional entries from the shape of a tensor.
  315. Args:
  316. axis (Union[None, int, list(int), tuple(int)], optional): Default is None.
  317. Returns:
  318. Tensor, with all or a subset of the dimensions of length 1 removed.
  319. Raises:
  320. TypeError: If input arguments have types not specified above.
  321. ValueError: If specified axis has shape entry :math:`> 1`.
  322. Supported Platforms:
  323. ``Ascend`` ``GPU`` ``CPU``
  324. Examples:
  325. >>> import numpy as np
  326. >>> from mindspore import Tensor
  327. >>> x = Tensor(np.ones((1,2,2,1), dtype=np.float32))
  328. >>> x = x.squeeze()
  329. >>> print(x.shape)
  330. (2, 2)
  331. """
  332. shape = F.shape(x)
  333. if axis is None:
  334. return F.squeeze(x)
  335. # yield squeezed shape based on the axes
  336. new_shape = prepare_shape_for_squeeze_const(shape, axis)
  337. return F.reshape(x, new_shape)
  338. def argmax(x, axis=None):
  339. """
  340. Returns the indices of the maximum values along an axis.
  341. Args:
  342. axis (int, optional): By default, the index is into
  343. the flattened array, otherwise along the specified axis.
  344. Returns:
  345. Tensor, array of indices into the array. It has the same
  346. shape as a.shape with the dimension along axis removed.
  347. Raises:
  348. ValueError: if axis is out of range.
  349. Supported Platforms:
  350. ``Ascend`` ``GPU`` ``CPU``
  351. Examples:
  352. >>> import numpy as np
  353. >>> from mindspore import Tensor
  354. >>> a = Tensor(np.arange(10, 16).reshape(2, 3).astype("float32"))
  355. >>> print(a.argmax())
  356. 5
  357. """
  358. # P.Argmax only supports float
  359. x = x.astype(mstype.float32)
  360. if axis is None:
  361. x = ravel(x)
  362. axis = 0
  363. else:
  364. axis = check_axis_in_range_const(axis, F.rank(x))
  365. return P.Argmax(axis)(x)
  366. def argmin(x, axis=None):
  367. """
  368. Returns the indices of the minimum values along an axis.
  369. Args:
  370. a (Union[int, float, bool, list, tuple, Tensor]): Input array.
  371. axis (int, optional): By default, the index is into
  372. the flattened array, otherwise along the specified axis.
  373. Returns:
  374. Tensor, array of indices into the array. It has the same
  375. shape as a.shape with the dimension along axis removed.
  376. Raises:
  377. ValueError: if axis is out of range.
  378. Supported Platforms:
  379. ``Ascend`` ``GPU`` ``CPU``
  380. Examples:
  381. >>> import numpy as np
  382. >>> from mindspore import Tensor
  383. >>> a = Tensor(np.arange(10, 16).reshape(2, 3).astype("float32"))
  384. >>> print(a.argmin())
  385. 0
  386. """
  387. # P.Argmax only supports float
  388. x = x.astype(mstype.float32)
  389. if axis is None:
  390. x = ravel(x)
  391. axis = 0
  392. else:
  393. axis = check_axis_in_range_const(axis, F.rank(x))
  394. # P.Argmin is currently not supported
  395. return P.Argmax(axis)(F.neg_tensor(x))
  396. def cumsum(x, axis=None, dtype=None):
  397. """
  398. Returns the cumulative sum of the elements along a given axis.
  399. Note:
  400. If ``x.dtype`` is :class:`int8`, :class:`int16` or :class:`bool`, the result
  401. `dtype` will be elevated to :class:`int32`, :class:`int64` is not supported.
  402. Args:
  403. x (Tensor): Input tensor.
  404. axis (int, optional): Axis along which the cumulative sum is computed. The
  405. default (None) is to compute the cumsum over the flattened array.
  406. dtype (:class:`mindspore.dtype`, optional): If not specified, stay the same as original,
  407. tensor, unless it has an integer dtype with a precision less than :class:`float32`.
  408. In that case, :class:`float32` is used.
  409. Returns:
  410. Tensor.
  411. Supported Platforms:
  412. ``Ascend`` ``GPU`` ``CPU``
  413. Examples:
  414. >>> import numpy as np
  415. >>> from mindspore import Tensor
  416. >>> a = Tensor(np.ones((3,3)).astype("float32"))
  417. >>> output = a.cumsum(axis=0)
  418. >>> print(output)
  419. [[1. 1. 1.]
  420. [2. 2. 2.]
  421. [3. 3. 3.]]
  422. """
  423. original_dtype = x.dtype
  424. # If original tensor is int, and has precision less then int32, convert
  425. # to int32
  426. if x.dtype in (mstype.bool_, mstype.int8, mstype.int16, mstype.uint8, mstype.int16):
  427. x = x.astype(mstype.int32)
  428. if axis is None:
  429. x = x.ravel()
  430. axis = 0
  431. check_axis_in_range_const(axis, x.ndim)
  432. if dtype is not None and original_dtype != dtype:
  433. return cumsum_(x, axis).astype(dtype, copy=False)
  434. return cumsum_(x, axis)
  435. def copy(x):
  436. """
  437. Returns a copy of the tensor.
  438. Note:
  439. The current implementation does not support `order` argument.
  440. Args:
  441. x (Tensor): Input tensor.
  442. Returns:
  443. Copied tensor.
  444. Supported Platforms:
  445. ``Ascend`` ``GPU`` ``CPU``
  446. Examples:
  447. >>> import numpy as np
  448. >>> from mindspore import Tensor
  449. >>> a = Tensor(np.ones((3,3)).astype("float32"))
  450. >>> output = a.copy()
  451. >>> print(output)
  452. [[1. 1. 1.]
  453. [1. 1. 1.]
  454. [1. 1. 1.]]
  455. """
  456. if x.size == 0:
  457. return x
  458. origin_dtype = x.dtype
  459. if origin_dtype == mstype.bool_:
  460. return F.logical_not(F.logical_not(x))
  461. if origin_dtype != mstype.float64:
  462. x = x.astype(mstype.float32)
  463. x = x / 1.0
  464. x = x.astype(origin_dtype)
  465. return x
  466. def max(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disable=redefined-builtin
  467. """
  468. Returns the maximum of a tensor or maximum along an axis.
  469. Args:
  470. x (Tensor): Input Tensor.
  471. axis (None or int or tuple of ints, optional): defaults to None. Axis or
  472. axes along which to operate. By default, flattened input is used. If
  473. this is a tuple of ints, the maximum is selected over multiple axes,
  474. instead of a single axis or all the axes as before.
  475. keepdims (boolean, optional): defaults to False.
  476. If this is set to True, the axes which are reduced are left in the
  477. result as dimensions with size one. With this option, the result will
  478. broadcast correctly against the input array.
  479. initial (scalar, optional):
  480. The minimum value of an output element. Must be present to allow
  481. computation on empty slice.
  482. where (boolean Tensor, optional): defaults to True.
  483. A boolean array which is broadcasted to match the dimensions of array,
  484. and selects elements to include in the reduction. If non-default value
  485. is passed, initial must also be provided.
  486. Returns:
  487. Tensor or scalar, maximum of input tensor. If `axis` is None, the result is a scalar
  488. value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``.
  489. Raises:
  490. TypeError: if the input is not a tensor.
  491. Supported Platforms:
  492. ``Ascend`` ``GPU`` ``CPU``
  493. Examples:
  494. >>> import numpy as np
  495. >>> from mindspore import Tensor
  496. >>> import mindspore.numpy as np
  497. >>> a = Tensor(np.arange(4).reshape((2,2)).astype('float32'))
  498. >>> output = a.max()
  499. >>> print(output)
  500. 3.0
  501. """
  502. return compile_utils.reduce_(x, P.ReduceMax(keepdims), cmp_fn=F.maximum,
  503. axis=axis, keepdims=keepdims, initial=initial, where=where)
  504. def min(x, axis=None, keepdims=False, initial=None, where=True): # pylint: disable=redefined-builtin
  505. """
  506. Returns the minimum of a tensor or minimum along an axis.
  507. Args:
  508. a (Tensor): Input data.
  509. axis (None or int or tuple of ints, optional): defaults to None. Axis or
  510. axes along which to operate. By default, flattened input is used. If
  511. this is a tuple of ints, the minimum is selected over multiple axes,
  512. instead of a single axis or all the axes as before.
  513. keepdims (boolean, optional): defaults to False.
  514. If this is set to True, the axes which are reduced are left in the
  515. result as dimensions with size one. With this option, the result will
  516. broadcast correctly against the input array.
  517. initial (scalar, optional):
  518. The maximum value of an output element. Must be present to allow
  519. computation on empty slice.
  520. where (boolean Tensor, optional): defaults to True.
  521. A boolean array which is broadcasted to match the dimensions of array,
  522. and selects elements to include in the reduction. If non-default value
  523. is passed, initial must also be provided.
  524. Returns:
  525. Tensor or scalar, minimum of `a`. If axis is None, the result is a scalar
  526. value. If `axis` is given, the result is an array of dimension ``a.ndim - 1``.
  527. Raises:
  528. TypeError: if the input is not a tensor.
  529. Supported Platforms:
  530. ``Ascend`` ``GPU`` ``CPU``
  531. Examples:
  532. >>> import numpy as np
  533. >>> from mindspore import Tensor
  534. >>> import mindspore.numpy as np
  535. >>> a = Tensor(np.arange(4).reshape((2,2)).astype('float32'))
  536. >>> output = a.min()
  537. >>> print(output)
  538. 0.0
  539. """
  540. return compile_utils.reduce_(x, P.ReduceMin(keepdims), cmp_fn=F.minimum,
  541. axis=axis, keepdims=keepdims, initial=initial, where=where)
  542. def resize(x, *new_shape):
  543. """
  544. Changes shape and size of array in-place.
  545. Note:
  546. Instead of changing the size of the input array and returns nothing as in numpy,
  547. this method returns a new Tensor with the input size.
  548. Numpy argument `refcheck` is not supported.
  549. Args:
  550. new_shape (Union[ints, tuple of ints]): Shape of resized array.
  551. Returns:
  552. Tensor.
  553. Supported Platforms:
  554. ``Ascend`` ``GPU`` ``CPU``
  555. Examples:
  556. >>> from mindspore import numpy as np
  557. >>> x = np.array([[0, 1], [2, 3]])
  558. >>> x = x.resize(2, 3)
  559. >>> print(x)
  560. [[0 1 2]
  561. [3 0 0]]
  562. """
  563. if not new_shape:
  564. return x
  565. if len(new_shape) == 1:
  566. if isinstance(new_shape[0], tuple):
  567. new_shape = new_shape[0]
  568. flattened = x.ravel()
  569. cur_size = F.shape_mul(x.shape)
  570. new_size = F.shape_mul(new_shape)
  571. diff_size = new_size - cur_size
  572. if diff_size > 0:
  573. pad_val = F.fill(x.dtype, (diff_size,), 0)
  574. res = P.Concat()((flattened, pad_val))
  575. else:
  576. res = flattened[:new_size]
  577. return res.reshape(new_shape)
  578. def diagonal(x, offset=0, axis1=0, axis2=1):
  579. """
  580. Returns specified diagonals.
  581. Args:
  582. offset (int, optional): Offset of the diagonal from the main diagonal.
  583. Can be positive or negative. Defaults to main diagonal.
  584. axis1 (int, optional): Axis to be used as the first axis of the 2-D
  585. sub-arrays from which the diagonals should be taken. Defaults to
  586. first axis (0).
  587. axis2 (int, optional): Axis to be used as the second axis of the 2-D
  588. sub-arrays from which the diagonals should be taken. Defaults to
  589. second axis.
  590. Returns:
  591. Tensor, if `a` is 2-D, then `a` 1-D array containing the diagonal.
  592. Raises:
  593. ValueError: if the input tensor has less than two dimensions.
  594. Supported Platforms:
  595. ``Ascend`` ``GPU`` ``CPU``
  596. Examples:
  597. >>> import mindspore.numpy as np
  598. >>> a = np.arange(4).reshape(2,2)
  599. >>> print(a)
  600. [[0 1]
  601. [2 3]]
  602. >>> output = a.diagonal()
  603. >>> print(output)
  604. [0 3]
  605. """
  606. ndim = x.ndim
  607. if ndim < 2:
  608. const_utils.raise_value_error('diagonal requires an array of at least two dimensions')
  609. dtype = x.dtype
  610. axes = check_axis_valid((axis1, axis2), ndim)
  611. perm = ()
  612. for i in range(ndim):
  613. if i not in axes:
  614. perm += (i,)
  615. perm += axes
  616. x = x.transpose(perm)
  617. shape = x.shape
  618. n, m = shape[-2:]
  619. e = F.eye(n, m, dtype)
  620. if offset >= m or offset <= -n:
  621. e = F.fill(dtype, (n, m), 0)
  622. elif offset != 0:
  623. e = e.astype(mstype.float32)
  624. if offset > 0:
  625. e_left = F.fill(dtype, (n, offset), 0)
  626. e_right = e[..., 0:m-offset:1]
  627. e = P.Concat(1)((e_left, e_right)).astype(dtype)
  628. elif offset < 0:
  629. e_upper = F.fill(dtype, (-offset, m), 0)
  630. e_lower = e[0:n+offset:1, ...]
  631. e = P.Concat(0)((e_upper, e_lower)).astype(dtype)
  632. e = P.BroadcastTo(shape)(e)
  633. prod = F.tensor_mul(x, e)
  634. res = F.reduce_sum(prod.astype(mstype.float32), -1)
  635. begin = ()
  636. for i in range(ndim-2):
  637. begin += (0,)
  638. last_dim_begin = max_(0, -offset)
  639. begin += (last_dim_begin,)
  640. size = res.shape[:-1]
  641. last_dim_end = min_(
  642. shape[-2], max_(0, shape[-1] - offset)) - last_dim_begin
  643. if last_dim_end <= 0:
  644. return empty_compile(dtype, (0,))
  645. size += (last_dim_end,)
  646. res = F.tensor_slice(res, begin, size)
  647. return res.astype(dtype)
  648. def trace(x, offset=0, axis1=0, axis2=1, dtype=None):
  649. """
  650. Returns the sum along diagonals of the array.
  651. Args:
  652. offset (int, optional): Offset of the diagonal from the main diagonal.
  653. Can be positive or negative. Defaults to main diagonal.
  654. axis1 (int, optional): Axis to be used as the first axis of the 2-D
  655. sub-arrays from which the diagonals should be taken. Defaults to
  656. first axis (0).
  657. axis2 (int, optional): Axis to be used as the second axis of the 2-D
  658. sub-arrays from which the diagonals should be taken. Defaults to
  659. second axis.
  660. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  661. output Tensor.
  662. Returns:
  663. Tensor, sum_along_diagonals.
  664. Raises:
  665. ValueError: if the input tensor has less than two dimensions.
  666. Supported Platforms:
  667. ``Ascend`` ``GPU`` ``CPU``
  668. Examples:
  669. >>> import mindspore.numpy as np
  670. >>> x = np.eye(3)
  671. >>> print(x.trace())
  672. 3.0
  673. """
  674. d = x.diagonal(offset, axis1=axis1, axis2=axis2)
  675. shape = d.shape
  676. if dtype is None:
  677. dtype = d.dtype
  678. if shape[-1] == 0:
  679. return F.fill(dtype, shape[:-1], 0)
  680. res = F.reduce_sum(d.astype(mstype.float32), -1)
  681. return res.astype(dtype)
  682. def take(x, indices, axis=None, mode='clip'):
  683. """
  684. Takes elements from an array along an axis.
  685. Args:
  686. a (Tensor): Source array with shape `(Ni…, M, Nk…)`.
  687. indices (Tensor): The indices with shape `(Nj...)` of the values to extract.
  688. axis (int, optional): The axis over which to select values. By default,
  689. the flattened input array is used.
  690. mode (‘raise’, ‘wrap’, ‘clip’, optional):
  691. - edge: Pads with the edge values of `arr`.
  692. - raise: Raises an error;
  693. - wrap: Wraps around;
  694. - clip: Clips to the range. `clip` mode means that all indices that are
  695. too large are replaced by the index that addresses the last element
  696. along that axis. Note that this disables indexing with negative numbers.
  697. Returns:
  698. Tensor, the indexed result.
  699. Raises:
  700. ValueError: if axis is out of range.
  701. TypeError: if the input is not a Tensor.
  702. Supported Platforms:
  703. ``Ascend`` ``GPU`` ``CPU``
  704. Examples:
  705. >>> import mindspore.numpy as np
  706. >>> a = np.array([4, 3, 5, 7, 6, 8])
  707. >>> indices = np.array([0, 1, 4])
  708. >>> output = a.take(indices)
  709. >>> print(output)
  710. [4 3 6]
  711. """
  712. if mode not in ('raise', 'wrap', 'clip'):
  713. const_utils.raise_value_error('raise should be one of "raise", "wrap", or "clip"')
  714. if axis is None:
  715. a = x.ravel()
  716. axis = 0
  717. else:
  718. a = x
  719. ndim = a.ndim
  720. axis = check_axis_in_range_const(axis, ndim)
  721. shape_a = a.shape
  722. shape_indices = indices.shape
  723. size_indices = indices.size
  724. indices = compile_utils.check_indices(shape_a[axis], indices, mode)
  725. # reshapes indices to shape (Ni..., Nj..., Nk)
  726. shape_ni = tuple_slice(shape_a, None, axis)
  727. shape_nk = tuple_slice(shape_a, axis + 1, None)
  728. shape_out = shape_ni + shape_indices + shape_nk
  729. shape_indices = expanded_shape(ndim, size_indices, axis)
  730. indices = indices.reshape(shape_indices)
  731. shape_indices = shape_ni + (indices.size,) + shape_nk
  732. indices = P.BroadcastTo(shape_indices)(indices)
  733. res = F.gather_d(a, axis, indices)
  734. return res.reshape(shape_out)
  735. def choose(x, choices, mode='clip'):
  736. """
  737. Construct an array from an index array and a list of arrays to choose from.
  738. Args:
  739. choices (sequence of arrays): Choice arrays. `a` and all of the `choices` must
  740. be broadcastable to the same shape. If `choices` is itself an array, then
  741. its outermost dimension (i.e., the one corresponding to ``choices.shape[0]``)
  742. is taken as defining the “sequence”.
  743. mode (‘raise’, ‘wrap’, ‘clip’, optional): Specifies how indices outside
  744. ``[0, n-1]`` will be treated:
  745. ‘raise’ – raise an error (default);
  746. ‘wrap’ – wrap around;
  747. ‘clip’ – clip to the range. ‘clip’ mode means that all indices that are
  748. too large are replaced by the index that addresses the last element
  749. along that axis. Note that this disables indexing with negative numbers.
  750. Returns:
  751. Tensor, the merged result.
  752. Supported Platforms:
  753. ``Ascend`` ``GPU`` ``CPU``
  754. Raises:
  755. ValueError: if ``len(condlist) != len(choicelist)``.
  756. Examples:
  757. >>> import mindspore.numpy as np
  758. >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23], [30, 31, 32, 33]]
  759. >>> x = np.array([2, 3, 1, 0])
  760. >>> print(x.choose(choices))
  761. [20 31 12 3]
  762. """
  763. if check_is_tensor(F.typeof(choices)):
  764. shape_choice = infer_out_shape(x.shape, choices.shape[1:])
  765. choices = P.BroadcastTo((choices.shape[0],) + shape_choice)(choices)
  766. else:
  767. # broadcasts choices to the same shape if choices is a sequence
  768. choicelist = []
  769. shapes = ()
  770. for choice in choices:
  771. if not check_is_tensor(F.typeof(choice)):
  772. choice = const_utils.make_tensor(choice)
  773. shapes += (choice.shape,)
  774. choicelist.append(choice)
  775. shape_choice = infer_out_shape(x.shape, *shapes)
  776. tmp = []
  777. for choice in choicelist:
  778. tmp.append(P.BroadcastTo(shape_choice)(choice))
  779. choices = F.stack(tmp)
  780. if x.ndim == 0 or choices.ndim == 0:
  781. const_utils.raise_value_error('input cannot be scalars')
  782. a = P.BroadcastTo(shape_choice)(x)
  783. dtype = choices.dtype
  784. # adjusts dtype for F.tensor_mul and F.gather_nd
  785. a = a.astype(mstype.int32)
  786. choices = choices.astype(mstype.int32)
  787. a = compile_utils.check_indices(choices.shape[0], a, mode, allow_negative_index=False)
  788. grids = []
  789. ndim = len(a.shape)
  790. for i in range(ndim):
  791. dim_grid = const_utils.make_tensor(F.make_range(a.shape[i]), mstype.int32)
  792. dim_shape = expanded_shape(ndim, a.shape[i], i)
  793. dim_grid = P.BroadcastTo(a.shape)(dim_grid.reshape(dim_shape))
  794. grids.append(dim_grid)
  795. grid = P.Stack(-1)(grids)
  796. indices = P.Concat(-1)((a.reshape(a.shape + (1,)), grid))
  797. return F.gather_nd(choices, indices).astype(dtype)
  798. def searchsorted(x, v, side='left', sorter=None):
  799. """
  800. Finds indices where elements should be inserted to maintain order.
  801. Args:
  802. v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into `a`.
  803. side ('left', 'right', optional): If ‘left’, the index of the first suitable
  804. location found is given. If ‘right’, return the last such index. If there is
  805. no suitable index, return either 0 or N (where N is the length of `a`).
  806. sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional array of
  807. integer indices that sort array `a` into ascending order. They are typically
  808. the result of argsort.
  809. Returns:
  810. Tensor, array of insertion points with the same shape as `v`.
  811. Raises:
  812. ValueError: if argument for `side` or `sorter` is invalid.
  813. Supported Platforms:
  814. ``Ascend`` ``GPU`` ``CPU``
  815. Examples:
  816. >>> from mindspore import numpy as np
  817. >>> x = np.array([1,2,3,4,5])
  818. >>> print(x.searchsorted(3))
  819. 2
  820. """
  821. if side not in ('left', 'right'):
  822. const_utils.raise_value_error('invalid value for keyword "side"')
  823. a = x.astype(mstype.float32)
  824. if not check_is_tensor(F.typeof(v)):
  825. v = const_utils.make_tensor(v)
  826. shape = v.shape
  827. if sorter is not None:
  828. if sorter.ndim != 1 or sorter.size != a.size:
  829. const_utils.raise_value_error('sorter must be 1-D array with the same size as `a`')
  830. sorter = const_utils.make_tensor(sorter)
  831. sorter = sorter.reshape(sorter.shape + (1,))
  832. a = F.gather_nd(a, sorter)
  833. less_op = F.tensor_le if side == 'left' else F.tensor_lt
  834. i = F.fill(mstype.int32, shape, 0)
  835. j = F.fill(mstype.int32, shape, a.size)
  836. sort_range = F.make_range(get_log2_size(F.shape_mul(shape) + 1))
  837. for _ in sort_range:
  838. mid = (i - F.neg_tensor(j))//2
  839. mask = less_op(v, F.gather_nd(a, mid.reshape(mid.shape + (1,))))
  840. i = F.select(mask, i, mid)
  841. j = F.select(mask, mid, j)
  842. return j
  843. def fill(x, value):
  844. """
  845. Fills the array with a scalar value.
  846. Note:
  847. Unlike Numpy, tensor.fill() will always returns a new tensor, instead of
  848. filling the original tensor.
  849. Args:
  850. value (Union[None, int, float, bool]): All elements of a will be assigned this value.
  851. Returns:
  852. Tensor, with the original dtype and shape as input tensor.
  853. Raises:
  854. TypeError: If input arguments have types not specified above.
  855. ValueError: If `shape` has entries < 0.
  856. Supported Platforms:
  857. ``Ascend`` ``GPU`` ``CPU``
  858. Examples:
  859. >>> import numpy as np
  860. >>> from mindspore import Tensor
  861. >>> a = Tensor(np.arange(4).reshape((2,2)).astype('float32'))
  862. >>> print(a.fill(1.0))
  863. [[1. 1.]
  864. [1. 1.]]
  865. """
  866. if value is None:
  867. if x.dtype not in (mstype.float16, mstype.float32, mstype.float64):
  868. const_utils.raise_type_error("If None is used as value, the original Tensor's dtype must be float.")
  869. value = nan_tensor
  870. return F.tile(value, x.shape).astype(x.dtype)
  871. if not isinstance(value, (int, float, bool)):
  872. const_utils.raise_type_error("input value must be a scalar.")
  873. return F.fill(x.dtype, x.shape, value)
  874. def ptp(x, axis=None, keepdims=False):
  875. """
  876. The name of the function comes from the acronym for ‘peak to peak’.
  877. Note:
  878. Numpy arguments `dtype` and `out` are not supported.
  879. Args:
  880. x (Tensor): Input tensor.
  881. axis (Union[None, int, tuple(int)]): Axis or axes along which the range is computed.
  882. The default is to compute the variance of the flattened array. Default: None.
  883. keepdims (bool): Default is False.
  884. Returns:
  885. Tensor.
  886. Raises:
  887. TypeError: if the input is not a tensor.
  888. Supported Platforms:
  889. ``Ascend`` ``GPU`` ``CPU``
  890. Examples:
  891. >>> from mindspore import Tensor
  892. >>> x = Tensor([[4.0, 9.0, 2.0, 10.0], [6.0, 9.0, 7.0, 12.0]]).astype("float32")
  893. >>> print(x.ptp(axis=1))
  894. [8. 6.]
  895. >>> print(x.ptp(axis=0))
  896. [2. 0. 5. 2.]
  897. """
  898. if not isinstance(keepdims, bool):
  899. const_utils.raise_type_error('keepdims should be boolean')
  900. if axis is None:
  901. axis = ()
  902. else:
  903. check_axis_type(axis, True, True, False)
  904. axis = check_axis_valid(axis, x.ndim)
  905. return x.max(axis, keepdims) - x.min(axis, keepdims)
  906. def clip(x, xmin, xmax, dtype=None):
  907. """
  908. Clips (limits) the values in an array.
  909. Given an interval, values outside the interval are clipped to the interval edges.
  910. For example, if an interval of :math:`[0, 1]` is specified, values smaller than 0 become 0,
  911. and values larger than 1 become 1.
  912. Note:
  913. Currently, clip with `nan` is not supported.
  914. Args:
  915. x (Tensor): Tensor containing elements to clip.
  916. xmin (Tensor, scalar, None): Minimum value. If None, clipping is not performed
  917. on lower interval edge. Not more than one of `xmin` and `xmax` may be None.
  918. xmax (Tensor, scalar, None): Maximum value. If None, clipping is not performed
  919. on upper interval edge. Not more than one of `xmin` and `xmax` may be None.
  920. If `xmin` or `xmax` are tensors, then the three tensors will be broadcasted
  921. to match their shapes.
  922. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  923. output Tensor.
  924. Returns:
  925. Tensor, a tensor with the elements of `x`, but where values
  926. < `xmin` are replaced with `xmin`, and those > `xmax` with `xmax`.
  927. Supported Platforms:
  928. ``Ascend`` ``GPU`` ``CPU``
  929. Examples:
  930. >>> from mindspore import Tensor
  931. >>> x = Tensor([1, 2, 3, -4, 0, 3, 2, 0]).astype("float32")
  932. >>> output = x.clip(0, 2)
  933. >>> print(output)
  934. [1 2 2 0 0 2 2 0]
  935. """
  936. if xmin is None and xmax is None:
  937. const_utils.raise_value_error("One of max or min must be given.")
  938. is_scalar = False
  939. if xmin is not None:
  940. xmin = const_utils.make_tensor(xmin).astype(x.dtype)
  941. if x.ndim == 0 and xmin.ndim == 0:
  942. x = F.maximum(x.reshape((1,)), xmin).squeeze()
  943. else:
  944. x = F.maximum(x, xmin)
  945. if xmax is not None:
  946. xmax = const_utils.make_tensor(xmax).astype(x.dtype)
  947. if x.ndim == 0 and xmax.ndim == 0:
  948. x = F.minimum(x.reshape((1,)), xmax).squeeze()
  949. else:
  950. x = F.minimum(x, xmax)
  951. if is_scalar:
  952. return x.squeeze()
  953. if dtype is not None and dtype != x.dtype:
  954. return x.astype(dtype)
  955. return x
  956. def var(x, axis=None, ddof=0, keepdims=False):
  957. """
  958. Compute the variance along the specified axis.
  959. The variance is the average of the squared deviations from the mean, i.e.,
  960. :math:`var = mean(abs(x - x.mean())**2)`.
  961. Return the variance, which is computed for the flattened array by default,
  962. otherwise over the specified axis.
  963. Note:
  964. Numpy arguments `dtype`, `out` and `where` are not supported.
  965. Args:
  966. x (Tensor): A Tensor to be calculated.
  967. axis (Union[None, int, tuple(int)]): Axis or axes along which the variance is computed.
  968. The default is to compute the variance of the flattened array. Default: `None`.
  969. ddof (int): Means Delta Degrees of Freedom. Default: 0.
  970. The divisor used in calculations is :math:`N - ddof`, where :math:`N` represents the number of elements.
  971. keepdims (bool): Default: `False`.
  972. Supported Platforms:
  973. ``Ascend`` ``GPU`` ``CPU``
  974. Returns:
  975. Standard deviation tensor.
  976. Examples:
  977. >>> import mindspore.numpy as np
  978. >>> input_x = np.array([1., 2., 3., 4.])
  979. >>> print(input_x.var())
  980. 1.25
  981. """
  982. if 0 in x.shape:
  983. return nan_tensor.astype(x.dtype)
  984. if not isinstance(ddof, int) or not isinstance(keepdims, int):
  985. const_utils.raise_type_error("integer argument expected")
  986. if axis is None:
  987. axis = ()
  988. else:
  989. axis = check_and_canonicalize_axes(axis, x.ndim)
  990. x_mean = _mean_keepdims(x, axis)
  991. x_sub = F.tensor_sub(x, x_mean)
  992. x_pow = F.tensor_pow(x_sub, 2)
  993. if keepdims:
  994. x_sum = _reduce_sum_keepdims(x_pow, axis)
  995. else:
  996. x_sum = _reduce_sum_default(x_pow, axis)
  997. if axis == ():
  998. axis = F.make_range(x.ndim)
  999. nums = 1
  1000. for ax in axis:
  1001. nums *= x.shape[ax]
  1002. return F.tensor_div(x_sum, nums - ddof)
  1003. def std(x, axis=None, ddof=0, keepdims=False):
  1004. """
  1005. Compute the standard deviation along the specified axis.
  1006. The standard deviation is the square root of the average of the squared deviations
  1007. from the mean, i.e., :math:`std = sqrt(mean(abs(x - x.mean())**2))`.
  1008. Return the standard deviation, which is computed for the flattened array by default,
  1009. otherwise over the specified axis.
  1010. Note:
  1011. Numpy arguments `dtype`, `out` and `where` are not supported.
  1012. Args:
  1013. x (Tensor): A Tensor to be calculated.
  1014. axis (Union[None, int, tuple(int)]): Axis or axes along which the standard
  1015. deviation is computed. Default: `None`.
  1016. If `None`, compute the standard deviation of the flattened array.
  1017. ddof (int): Means Delta Degrees of Freedom. The divisor used in calculations is :math:`N - ddof`,
  1018. where :math:`N` represents the number of elements. Default: 0.
  1019. keepdims: Default: `False`.
  1020. Returns:
  1021. Standard deviation tensor.
  1022. Supported Platforms:
  1023. ``Ascend`` ``GPU`` ``CPU``
  1024. Examples:
  1025. >>> import mindspore.numpy as np
  1026. >>> input_x = np.array([1., 2., 3., 4.])
  1027. >>> print(input_x.std())
  1028. 1.118034
  1029. """
  1030. x_var = var(x, axis, ddof, keepdims)
  1031. return F.tensor_pow(x_var, 0.5)
  1032. def sum(x, axis=None, dtype=None, keepdims=False, initial=None): # pylint: disable=redefined-builtin
  1033. """
  1034. Return sum of array elements over a given axis.
  1035. Note:
  1036. Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and
  1037. `extobj` are not supported.
  1038. Args:
  1039. x (Union[int, float, bool, list, tuple, Tensor]): Elements to sum.
  1040. axis (Union[None, int, tuple(int)]): Axis or axes along which a sum is performed. Default: None.
  1041. If None, sum all of the elements of the input array.
  1042. If axis is negative it counts from the last to the first axis.
  1043. If axis is a tuple of ints, a sum is performed on all of the axes specified in the tuple
  1044. instead of a single axis or all the axes as before.
  1045. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  1046. output Tensor.
  1047. keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
  1048. dimensions with size one. With this option, the result will broadcast correctly against the input array.
  1049. If the default value is passed, then keepdims will not be passed through to the sum method of
  1050. sub-classes of ndarray, however any non-default value will be. If the sub-class’ method does not
  1051. implement keepdims any exceptions will be raised.
  1052. initial (scalar): Starting value for the sum.
  1053. Returns:
  1054. Tensor. A tensor with the same shape as input, with the specified axis removed.
  1055. If input tensor is a 0-d array, or if axis is None, a scalar is returned.
  1056. Raises:
  1057. TypeError: If input is not array_like or `axis` is not int or tuple of ints or
  1058. `keepdims` is not integer or `initial` is not scalar.
  1059. ValueError: If any axis is out of range or duplicate axes exist.
  1060. Supported Platforms:
  1061. ``Ascend`` ``GPU`` ``CPU``
  1062. Examples:
  1063. >>> import mindspore.numpy as np
  1064. >>> input_x = np.array([-1, 0, 1]).astype('int32')
  1065. >>> print(input_x.sum())
  1066. 0
  1067. >>> input_x = np.arange(10).reshape(2, 5).astype('float32')
  1068. >>> print(input_x.sum(axis=1))
  1069. [10. 35.]
  1070. """
  1071. input_x = x.astype(mstype.int32) if x.dtype == mstype.bool_ else x
  1072. dtype = input_x.dtype if dtype is None else dtype
  1073. if not isinstance(keepdims, int):
  1074. const_utils.raise_type_error("integer argument expected")
  1075. if initial is not None and not isinstance(initial, (int, float, bool)):
  1076. const_utils.raise_type_error("initial argument should be a scalar.")
  1077. if axis is None:
  1078. axis = ()
  1079. else:
  1080. axis = check_and_canonicalize_axes(axis, x.ndim)
  1081. if not check_type_support(input_x.dtype, 'GPU', (mstype.float64, mstype.float32, mstype.float16)):
  1082. input_x = input_x.astype(mstype.float32)
  1083. if 0 in x.shape:
  1084. x = const_utils.make_tensor([0], x.dtype)
  1085. if keepdims:
  1086. res = _reduce_sum_keepdims(input_x, axis)
  1087. else:
  1088. res = _reduce_sum_default(input_x, axis)
  1089. if initial is not None:
  1090. res += initial
  1091. return res.astype(dtype)
  1092. def repeat(x, repeats, axis=None):
  1093. """
  1094. Repeat elements of an array.
  1095. Args:
  1096. x (Tensor): Input tensor.
  1097. repeats (Union[int, tuple, list]): The number of repetitions for each element.
  1098. `repeats` is broadcasted to fit the shape of the given axis.
  1099. axis (int, optional): The axis along which to repeat values. By default,
  1100. use the flattened input tensor, and return a flat output tensor.
  1101. Returns:
  1102. Tensor, has the same shape as input tensor except along the given axis.
  1103. Raises:
  1104. ValueError: if axis is out of range.
  1105. TypeError: if input is not a Tensor.
  1106. Supported Platforms:
  1107. ``Ascend`` ``GPU`` ``CPU``
  1108. Examples:
  1109. >>> import mindspore.numpy as np
  1110. >>> x = np.array(3)
  1111. >>> print(x.repeat(4))
  1112. [3 3 3 3]
  1113. >>> x = np.array([[1,2],[3,4]])
  1114. >>> print(x.repeat(2))
  1115. [1 1 2 2 3 3 4 4]
  1116. >>> print(x.repeat(3, axis=1))
  1117. [[1 1 1 2 2 2]
  1118. [3 3 3 4 4 4]]
  1119. >>> print(x.repeat([1,2], axis=0))
  1120. [[1 2]
  1121. [3 4]
  1122. [3 4]]
  1123. """
  1124. if not isinstance(repeats, (tuple, list)):
  1125. repeats = (repeats,)
  1126. for element in repeats:
  1127. if not isinstance(element, int):
  1128. const_utils.raise_type_error("Each element should be integer")
  1129. if axis is None:
  1130. x = ravel(x)
  1131. axis = 0
  1132. if not isinstance(axis, int):
  1133. const_utils.raise_type_error('axes should be integers')
  1134. check_axis_in_range_const(axis, x.ndim)
  1135. axis = axis + x.ndim if axis < 0 else axis
  1136. if len(repeats) == 1:
  1137. repeats = repeats[0]
  1138. if repeats == 0:
  1139. return empty_tensor(x.dtype)
  1140. return repeat_elements(x, repeats, axis)
  1141. size = x.shape[axis]
  1142. if len(repeats) != size:
  1143. const_utils.raise_value_error('operands could not be broadcast together')
  1144. subs = P.Split(axis, size)(x)
  1145. repeated_subs = []
  1146. for sub, rep in zip(subs, repeats):
  1147. if rep != 0:
  1148. repeated_subs.append(repeat_elements(sub, rep, axis))
  1149. return P.Concat(axis)(repeated_subs)
  1150. def getitem(data, index):
  1151. """Implementation of `getitem`."""
  1152. return data.__getitem__(index)
  1153. def setitem(data, index, value):
  1154. """Implementation of `setitem`."""
  1155. return data.__setitem__(index, value)
  1156. def item(data, *args):
  1157. """Implementation of `item`."""
  1158. return compile_utils.tensor_item(data, *args)
  1159. def itemset(data, *args):
  1160. """Implementation of `itemset`."""
  1161. return compile_utils.tensor_itemset(data, *args)
  1162. def ms_iter(xs):
  1163. """Implementation of `iter`."""
  1164. return xs.__ms_iter__()
  1165. def ms_next(it):
  1166. """Implementation of `next`."""
  1167. return it.__ms_next__()
  1168. def hasnext(it):
  1169. """Implementation of `hasnext`."""
  1170. return it.__ms_hasnext__()
  1171. def ms_len(data):
  1172. """Implementation of `len`."""
  1173. return data.__len__()
  1174. def floor(x):
  1175. """Implementation of `floor`."""
  1176. return x.__floor__()
  1177. def trunc(x):
  1178. """Implementation of `trunc`."""
  1179. return x.__trunc__()
  1180. def uadd(x):
  1181. """Implementation of `uadd`."""
  1182. return x.__pos__()
  1183. def usub(x):
  1184. """Implementation of `usub`."""
  1185. return x.__neg__()
  1186. def scalar_truediv(x, y):
  1187. """Implementation of `scalar_truediv`."""
  1188. return x.__truediv__(y)
  1189. def scalar_floordiv(x, y):
  1190. """Implementation of `scalar_floordiv`."""
  1191. return x.__floordiv__(y)
  1192. def bool_(x):
  1193. """Implementation of `bool`."""
  1194. return x.__bool__()
  1195. def enumerate_(x, start=0):
  1196. """Enumerate list or tuple or tensor."""
  1197. x_type = F.typeof(x)
  1198. ret = ()
  1199. op_name = "enumerate"
  1200. if check_is_tuple_or_list_or_tensor(x_type, op_name, "first input") and \
  1201. check_is_const_int(start, op_name, "start"):
  1202. if check_is_tensor(x_type):
  1203. for i in range(x.shape[0]):
  1204. ret += ((start + i, x[i]),)
  1205. else:
  1206. ret = zip(range(start, start + len(x)), x)
  1207. return ret
  1208. def expand_tensor_as(x, y):
  1209. """Expand tensor"""
  1210. broadcast_to = P.BroadcastTo(shape_(y))
  1211. return broadcast_to(x)
  1212. def view(x, *shape):
  1213. """Reshape tensor, if shape is -1, reshape tensor into one dimension"""
  1214. shape = check_view_shape(shape)
  1215. return F.reshape(x, shape)
  1216. def isinstance_(x, base_type):
  1217. """Determine whether x is an instance of base_type."""
  1218. x_type = F.typeof(x)
  1219. return check_type_same(x_type, base_type)
  1220. def while_cond(x):
  1221. """For while condition, if the condition is a tensor, the loop will not be unrolled"""
  1222. if F.issubclass_(F.typeof(x), F.typeof(mstype.tensor)):
  1223. is_cond = check_is_tensor_bool_cond(F.shape(x))
  1224. if is_cond:
  1225. return F.cast(x, mstype.bool_)
  1226. return x
  1227. @constexpr
  1228. def empty_tensor(dtype):
  1229. return Tensor([], dtype)
  1230. @constexpr
  1231. def check_type_same(x_type, base_type):
  1232. """Check x_type is same as base_type."""
  1233. pytype_to_mstype = {
  1234. bool: mstype.Bool,
  1235. int: mstype.Int,
  1236. float: mstype.Float,
  1237. str: mstype.String,
  1238. list: mstype.List,
  1239. tuple: mstype.Tuple,
  1240. dict: mstype.Dict,
  1241. Tensor: mstype.tensor_type,
  1242. Parameter: mstype.ref_type
  1243. }
  1244. has_int = False
  1245. has_tensor = False
  1246. def to_target_type(origin_type):
  1247. try:
  1248. if isinstance(origin_type, type):
  1249. ret_type = pytype_to_mstype[origin_type]
  1250. if ret_type == mstype.Int:
  1251. nonlocal has_int
  1252. has_int = True
  1253. if ret_type == mstype.tensor_type:
  1254. nonlocal has_tensor
  1255. has_tensor = True
  1256. return (ret_type,)
  1257. if isinstance(origin_type, tuple):
  1258. return tuple(to_target_type(i) for i in origin_type)
  1259. raise TypeError(f"The second arg of 'isinstance' must be a type or a tuple of types, "
  1260. f"but got a {type(origin_type).__name__}")
  1261. except KeyError:
  1262. raise TypeError(f"The second arg of 'isinstance' should be bool, int, float, str, list, tuple, "
  1263. f"Tensor, Parameter, or a tuple containing only these types, but got {origin_type}")
  1264. target_type = to_target_type(base_type)
  1265. if (isinstance(x_type, mstype.Bool) and has_int) or (isinstance(x_type, mstype.ref_type) and has_tensor):
  1266. return True
  1267. return isinstance(x_type, target_type)
  1268. @constexpr
  1269. def get_itemsize(x_type):
  1270. """get itemsize from tensor's dtype."""
  1271. return itemsize_map[x_type]
  1272. @constexpr
  1273. def check_is_tensor(x):
  1274. """check whether x is tensor."""
  1275. if isinstance(x, mstype.tensor_type):
  1276. return True
  1277. return False
  1278. @constexpr
  1279. def check_is_tuple_or_list_or_tensor(x, op_name, arg_name):
  1280. """check whether x is list or tuple or tensor."""
  1281. if isinstance(x, (mstype.List, mstype.Tuple, mstype.tensor_type)):
  1282. return True
  1283. raise TypeError(f"For '{op_name}', the '{arg_name}' should be tuple or list or tensor, but got {x}.")
  1284. @constexpr
  1285. def check_is_const_int(x, op_name, arg_name):
  1286. """check whether x is const int."""
  1287. if x is None:
  1288. raise TypeError(f"For '{op_name}', the '{arg_name}' should be a const int number, but got not const.")
  1289. if not isinstance(x, int):
  1290. raise TypeError(f"For '{op_name}', the '{arg_name}' should be a const int number, but got {x}.")
  1291. return True
  1292. @constexpr
  1293. def check_is_tensor_bool_cond(shp):
  1294. """check if tensor is a bool condition"""
  1295. if shp in ((), (1,)):
  1296. return True
  1297. raise ValueError("The truth value of an array with several elements is ambiguous.")
  1298. @constexpr
  1299. def const_tensor_to_bool(x):
  1300. """convert bool tensor to bool condition"""
  1301. if x is None:
  1302. raise ValueError("Only constant tensor bool can be converted to bool")
  1303. x = x.asnumpy()
  1304. if x.shape == ():
  1305. return bool(x)
  1306. if x.shape == (1,):
  1307. return bool(x[0])
  1308. raise ValueError("The truth value of an array with several elements is ambiguous.")
  1309. @constexpr
  1310. def check_view_shape(x):
  1311. """Check view function input shape"""
  1312. if not x:
  1313. raise ValueError("The shape variable should not be empty")
  1314. if isinstance(x[0], tuple):
  1315. if len(x) != 1:
  1316. raise ValueError(f"Only one tuple is needed, but got {x}")
  1317. x = x[0]
  1318. return x
  1319. # convert normal param_check functions to constexpr functions
  1320. check_astype_dtype_const = constexpr(validator.check_astype_dtype)
  1321. check_transpose_axis_const = constexpr(validator.check_transpose_axis)
  1322. check_reshape_shp_const = constexpr(validator.check_reshape_shp)
  1323. check_flatten_order_const = constexpr(validator.check_flatten_order)
  1324. check_swapaxes_axis_const = constexpr(validator.check_swapaxes_axis)
  1325. prepare_shape_for_squeeze_const = constexpr(validator.prepare_shape_for_squeeze)
  1326. check_axis_in_range_const = constexpr(validator.check_axis_in_range)
  1327. check_axis_valid = constexpr(validator.check_axis_valid)
  1328. max_ = constexpr(validator.max_)
  1329. min_ = constexpr(validator.min_)
  1330. expanded_shape = constexpr(validator.expanded_shape)
  1331. tuple_slice = constexpr(validator.tuple_slice)
  1332. infer_out_shape = constexpr(validator.infer_out_shape)
  1333. get_log2_size = constexpr(validator.get_log2_size)
  1334. check_axis_type = constexpr(validator.check_axis_type)
  1335. check_and_canonicalize_axes = constexpr(validator.check_and_canonicalize_axes)
  1336. empty_compile = constexpr(validator.empty_compile)
  1337. check_type_support = constexpr(validator.check_type_support)
  1338. def tensor_bool(x):
  1339. """tensor as condition, if is constant, return immediate bool value"""
  1340. is_cond = check_is_tensor_bool_cond(F.shape(x))
  1341. if is_cond and F.isconstant(x):
  1342. return const_tensor_to_bool(x)
  1343. return F.cast(x, mstype.bool_)
  1344. def and_(x, y):
  1345. """Implementation of `and` (`&`)."""
  1346. return x.__and__(y)
  1347. def or_(x, y):
  1348. """Implementation of `or` (`|`)."""
  1349. return x.__or__(y)
  1350. def matmul(x, y):
  1351. """Implementation of `matmul` (`@`)."""
  1352. return x.__matmul__(y)
  1353. def float_bool(x):
  1354. """Implementation of `float_bool`."""
  1355. return x != 0.0
  1356. def int_bool(x):
  1357. """Implementation of `int_bool`."""
  1358. return x != 0
  1359. def str_bool(x):
  1360. """Implementation of `str_bool`."""
  1361. if x == "":
  1362. return False
  1363. return True
  1364. def list_bool(x):
  1365. """Implementation of `tuple_bool`."""
  1366. return len(x) != 0
  1367. def tuple_bool(x):
  1368. """Implementation of `tuple_bool`."""
  1369. return len(x) != 0
  1370. def dict_bool(x):
  1371. """Implementation of `dict_bool`."""
  1372. return len(x) != 0
  1373. def none_bool(x):
  1374. """Implementation of `none_bool`."""
  1375. return False
  1376. def func_bool(x):
  1377. """Implementation of `func_bool`."""
  1378. return True
  1379. def float_floordiv(x, y):
  1380. """Implementation of `float_floordiv`."""
  1381. return floor(x / y)
  1382. #############
  1383. # Iteration #
  1384. #############
  1385. @dataclass(frozen=True)
  1386. class SequenceIterator:
  1387. """
  1388. SequenceIterator is a util dataclass for iterating sequence object.
  1389. Iterator to use for sequences like List, Array.
  1390. """
  1391. idx: int
  1392. seq: list
  1393. @core(ignore_values=True)
  1394. def __ms_hasnext__(self):
  1395. """Whether the index is past the length of the sequence."""
  1396. return self.idx < ms_len(self.seq)
  1397. @core(ignore_values=True)
  1398. def __ms_next__(self):
  1399. """Return the next element and a new iterator."""
  1400. return self.seq[self.idx], SequenceIterator(self.idx + 1, self.seq)
  1401. def list_iter(xs):
  1402. """Iterator for List."""
  1403. return SequenceIterator(0, xs)
  1404. def array_iter(xs):
  1405. """Iterator for Array."""
  1406. return SequenceIterator(0, xs)
  1407. def tuple_next(xs):
  1408. """Next tuple."""
  1409. return xs[0], tail(xs)
  1410. def tuple_hasnext(xs):
  1411. """Whether the tuple is empty or not."""
  1412. return len(xs) > 0
  1413. def list_next(xs):
  1414. """Next list."""
  1415. return xs[0], tail(xs)
  1416. def list_hasnext(xs):
  1417. """Whether the list is empty or not."""
  1418. return len(xs) > 0
  1419. # pylint: disable=redefined-outer-name
  1420. def list_append(self_, item):
  1421. return _append(self_, item)
  1422. #################
  1423. # Array methods #
  1424. #################
  1425. def to_array(x):
  1426. """Implementation of `to_array`."""
  1427. return x.__ms_to_array__()