You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

array_ops.py 70 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """array operations, the function docs are adapted from Numpy API."""
  16. import operator
  17. from ..common import dtype as mstype
  18. from ..common import Tensor
  19. from ..ops import operations as P
  20. from ..ops import functional as F
  21. from ..ops import composite as C
  22. from ..ops.primitive import constexpr
  23. from ..nn import Cell
  24. from .utils import _convert_list_tensor_to_tuple_tensor, _expand, _broadcast_to_shape, \
  25. _check_input_tensor, _broadcast_to, _to_tensor
  26. from .utils_const import _check_axes_range, _check_start_normalize, \
  27. _raise_type_error, _raise_value_error, _infer_out_shape, _empty, _promote, \
  28. _check_same_type, _check_axis_valid, _add_unit_axes, _broadcast_tuples, \
  29. _check_is_float, _check_axis_in_range, _check_axis_type, _canonicalize_axis, \
  30. _list_comprehensions, _check_element_int, _is_shape_empty, _type_convert, \
  31. _tuple_getitem, _expanded_shape, _seq_prod, _get_device, _tuple_setitem, \
  32. _raise_unimplemented_error
  33. # According to official numpy reference, the dimension of a numpy array must be less
  34. # than 32
  35. MAX_NUMPY_DIMS = 32
  36. def expand_dims(a, axis):
  37. """
  38. Expands the shape of a tensor.
  39. Inserts a new axis that will appear at the axis position in the expanded tensor shape.
  40. Args:
  41. a (Tensor): Input tensor array.
  42. axis (Union[int, list(int), tuple(int)]): Position in the expanded axes where
  43. the new axis is placed,
  44. Returns:
  45. Tensor, with the number of dimensions increased at specified axis.
  46. Raises:
  47. TypeError: If input arguments have types not specified above.
  48. ValueError: If axis exceeds a.ndim.
  49. Supported Platforms:
  50. ``Ascend`` ``GPU`` ``CPU``
  51. Examples:
  52. >>> import mindspore.numpy as np
  53. >>> x = np.ones((2,2))
  54. >>> x = np.expand_dims(x,0)
  55. >>> print(x.shape)
  56. (1, 2, 2)
  57. """
  58. _check_input_tensor(a)
  59. if not isinstance(axis, (int, tuple, list)):
  60. _raise_type_error("axis must be tuple, list or int, but got ", axis)
  61. if isinstance(axis, int):
  62. return F.expand_dims(a, axis)
  63. ndim = a.ndim + len(axis)
  64. axis = _canonicalize_axis(axis, ndim)
  65. for ax in axis:
  66. a = F.expand_dims(a, ax)
  67. return a
  68. def squeeze(a, axis=None):
  69. """
  70. Removes single-dimensional entries from the shape of an tensor.
  71. Args:
  72. a (Tensor): Input tensor array.
  73. axis (Union[None, int, list(int), tuple(list)]): Default is None.
  74. Returns:
  75. Tensor, with all or a subset of the dimensions of length :math:`1` removed.
  76. Raises:
  77. TypeError: If input arguments have types not specified above.
  78. ValueError: If specified axis has shape entry :math:`> 1`.
  79. Supported Platforms:
  80. ``Ascend`` ``GPU`` ``CPU``
  81. Examples:
  82. >>> import mindspore.numpy as np
  83. >>> x = np.ones((1,2,2,1))
  84. >>> x = np.squeeze(x)
  85. >>> print(x.shape)
  86. (2, 2)
  87. """
  88. _check_input_tensor(a)
  89. return a.squeeze(axis)
  90. def transpose(a, axes=None):
  91. """
  92. Reverses or permutes the axes of a tensor; returns the modified tensor.
  93. Args:
  94. a (Tensor): a tensor to be transposed
  95. axes (Union[None, tuple, list]): the axes order, if `axes` is `None`, transpose
  96. the entire tensor. Default is `None`.
  97. Returns:
  98. Tensor, the transposed tensor array.
  99. Raises:
  100. TypeError: If input arguments have types not specified above.
  101. ValueError: If the number of `axes` is not euqal to a.ndim.
  102. Supported Platforms:
  103. ``Ascend`` ``GPU`` ``CPU``
  104. Examples:
  105. >>> import mindspore.numpy as np
  106. >>> x = np.ones((1,2,3))
  107. >>> x = np.transpose(x)
  108. >>> print(x.shape)
  109. (3, 2, 1)
  110. """
  111. _check_input_tensor(a)
  112. return a.transpose(axes)
  113. def rollaxis(x, axis, start=0):
  114. """
  115. Rolls the specified axis backwards, until it lies in the given position.
  116. The positions of the other axes do not change relative to one another.
  117. Args:
  118. x (Tensor): A Tensor to be transposed.
  119. axis (int): The axis to be rolled.
  120. start (int): Default: 0.
  121. If :math:`start <= axis`, the axis is rolled back until it lies in this position (`start`).
  122. If :math:`start > axis`: the axis is rolled until it lies before this position (`start`).
  123. If :math:`start < 0`, the start will be normalized as shown in the table.
  124. (Please refer to the source code.)
  125. .. table
  126. +===========+=================+
  127. |start |Normalized start |
  128. +===========+=================+
  129. |-(x.ndim+1)| raise ValueError|
  130. +-----------+-----------------+
  131. |-x.ndim |0 |
  132. +-----------+-----------------+
  133. |... |... |
  134. +-----------+-----------------+
  135. |-1 |x.ndim-1 |
  136. +-----------+-----------------+
  137. |... |... |
  138. +-----------+-----------------+
  139. |x.ndim |x.ndim |
  140. +-----------+-----------------+
  141. |x.ndim+1 |raise ValueError |
  142. +===========+=================+
  143. ..
  144. Returns:
  145. Transposed Tensor. Has the same data type as the original tensor `x`.
  146. Supported Platforms:
  147. ``Ascend`` ``GPU`` ``CPU``
  148. Raises:
  149. TypeError: If `axis` or `start` is not integer, or `x` is not tensor.
  150. ValueError: If `axis` is not in the range of :math:`[-ndim, ndim-1]` or
  151. `start` is not in the range of :math:`[-ndim, ndim]`.
  152. Examples:
  153. >>> import mindspore.numpy as np
  154. >>> x = np.ones((2,3,4))
  155. >>> output = np.rollaxis(x, 0, 2)
  156. >>> print(output.shape)
  157. (3, 2, 4)
  158. """
  159. _check_input_tensor(x)
  160. if not isinstance(axis, int):
  161. _raise_type_error("integer argument expected, but got ", axis)
  162. if not isinstance(start, int):
  163. _raise_type_error("integer argument expected, but got ", start)
  164. shape = F.shape(x)
  165. ndim = F.tuple_len(shape)
  166. axis = _check_axes_range(axis, ndim)
  167. start = _check_start_normalize(start, ndim)
  168. if start - axis >= 0 and start - axis <= 1:
  169. return x
  170. perm = F.make_range(0, ndim)
  171. new_perm = None
  172. if start < axis:
  173. if axis + 1 < ndim:
  174. new_perm = perm[0:start] + perm[axis:axis+1] + \
  175. perm[start:axis] + perm[axis+1:]
  176. else:
  177. new_perm = perm[0:start] + perm[axis:axis+1] + perm[start:axis]
  178. if start > axis:
  179. if start < ndim:
  180. new_perm = perm[0:axis] + perm[axis+1:start] + \
  181. perm[axis:axis+1] + perm[start:]
  182. else:
  183. new_perm = perm[0:axis] + perm[axis+1:start] + \
  184. perm[axis:axis+1]
  185. return F.transpose(x, new_perm)
  186. def swapaxes(x, axis1, axis2):
  187. """
  188. Interchanges two axes of a tensor.
  189. Args:
  190. x (Tensor): A tensor to be transposed.
  191. axis1 (int): First axis.
  192. axis2 (int): Second axis.
  193. Returns:
  194. Transposed tensor, has the same data type as the original tensor `x`.
  195. Raises:
  196. TypeError: If `axis1` or `axis2` is not integer, or `x` is not tensor.
  197. ValueError: If `axis1` or `axis2` is not in the range of :math:`[-ndim, ndim-1]`.
  198. Supported Platforms:
  199. ``Ascend`` ``GPU`` ``CPU``
  200. Examples:
  201. >>> import mindspore.numpy as np
  202. >>> x = np.ones((2,3,4))
  203. >>> output = np.swapaxes(x, 0, 2)
  204. >>> print(output.shape)
  205. (4,3,2)
  206. """
  207. _check_input_tensor(x)
  208. return x.swapaxes(axis1, axis2)
  209. def reshape(x, new_shape):
  210. """
  211. Reshapes a tensor without changing its data.
  212. Args:
  213. x (Tensor): A tensor to be reshaped.
  214. new_shape (Union[int, list(int), tuple(int)]): The new shape should be
  215. compatible with the original shape. If the tuple has only one element,
  216. the result will be a 1-D tensor of that length. One shape dimension
  217. can be :math:`-1`. In this case, the value is inferred from the length of
  218. the tensor and remaining dimensions.
  219. Returns:
  220. Reshaped Tensor. Has the same data type as the original tensor `x`.
  221. Raises:
  222. TypeError: If new_shape is not integer, list or tuple, or `x` is not tensor.
  223. ValueError: If new_shape is not compatible with the original shape.
  224. Supported Platforms:
  225. ``Ascend`` ``GPU`` ``CPU``
  226. Examples:
  227. >>> import mindspore.numpy as np
  228. >>> x = np.asarray([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])
  229. >>> output = np.reshape(x, (3, 2))
  230. >>> print(output)
  231. [[-0.1 0.3]
  232. [ 3.6 0.4]
  233. [ 0.5 -3.2]]
  234. >>> output = np.reshape(x, (3, -1))
  235. >>> print(output)
  236. [[-0.1 0.3]
  237. [ 3.6 0.4]
  238. [ 0.5 -3.2]]
  239. >>> output = np.reshape(x, (6, ))
  240. >>> print(output)
  241. [-0.1 0.3 3.6 0.4 0.5 -3.2]
  242. """
  243. _check_input_tensor(x)
  244. return x.reshape(new_shape)
  245. def ravel(x):
  246. """
  247. Returns a contiguous flattened tensor.
  248. A 1-D tensor, containing the elements of the input, is returned.
  249. Args:
  250. x (Tensor): A tensor to be flattened.
  251. Returns:
  252. Flattened tensor, has the same data type as the original tensor `x`.
  253. Raises:
  254. TypeError: If `x` is not tensor.
  255. Supported Platforms:
  256. ``Ascend`` ``GPU`` ``CPU``
  257. Examples:
  258. >>> import mindspore.numpy as np
  259. >>> x = np.ones((2,3,4))
  260. >>> output = np.ravel(x)
  261. >>> print(output.shape)
  262. (24,)
  263. """
  264. _check_input_tensor(x)
  265. return x.ravel()
  266. @constexpr
  267. def _move_axes_for_concatenate(arr_shape, axis):
  268. """
  269. Moves axis 0 to the desiganated position, while keeps other axes' relative
  270. positions unchanged, only used if a single tensor is concatenated.
  271. """
  272. original_axes = tuple(range(len(arr_shape)))
  273. new_axes = original_axes[1:axis+1] + (0,) + original_axes[axis+1:]
  274. new_shape = arr_shape[1:axis+1] + (arr_shape[0] * arr_shape[axis+1],) + \
  275. arr_shape[axis+2:]
  276. return new_axes, new_shape
  277. def _promote_type_for_concatenate(tuple_of_tensors):
  278. """
  279. Checks dtype for all tensors in the tuple. If dtypes are not the same, promote
  280. them to the `highest` dtype in the tuple, so that they are ready for the concat
  281. operator.
  282. Args:
  283. tuple_of_tensors(tuple(tensor)): A tuple of tensors
  284. Returns:
  285. tuple of tensors, with each tensor promoted to ths same dtype.
  286. """
  287. need_cast = False
  288. final_type = tuple_of_tensors[0].dtype
  289. for tensor in tuple_of_tensors:
  290. if not _check_same_type(final_type, tensor.dtype):
  291. need_cast = True
  292. final_type = _promote(final_type, tensor.dtype)
  293. if not need_cast:
  294. return tuple_of_tensors
  295. tuple_of_casted_tensors = ()
  296. for tensor in tuple_of_tensors:
  297. tuple_of_casted_tensors += (tensor.astype(final_type, copy=False),)
  298. return tuple_of_casted_tensors
  299. def concatenate(arrays, axis=0):
  300. """
  301. Joins a sequence of tensors along an existing axis.
  302. Args:
  303. arrays (Union[Tensor, tuple(Tensor), list(Tensor)]): a tensor or a list
  304. of tensors to be concatenated.
  305. axis (Union[None, int], optional): The axis along which the tensors will be joined,
  306. if `axis` is :class:`None`, tensors are flattened before use. Default is 0.
  307. Returns:
  308. A tensor concatenated from a tensor or a list of tensors.
  309. Raises:
  310. TypeError: If input arguments have types not specified above.
  311. ValueError: If specified `axis` < 0, or exceeds tensor.ndim.
  312. Supported Platforms:
  313. ``Ascend`` ``GPU`` ``CPU``
  314. Examples:
  315. >>> import mindspore.numpy as np
  316. >>> x1 = np.ones((1,2,3))
  317. >>> x2 = np.ones((1,2,1))
  318. >>> x = np.concatenate((x1, x2), axis=-1)
  319. >>> print(x.shape)
  320. (1, 2, 4)
  321. """
  322. if isinstance(arrays, Tensor):
  323. # if only one tensor is provided, it is treated as a tuple along the
  324. # first dimension. For example, a tensor of shape (3,4,5) will be treated
  325. # as: tuple(tensor_1(4,5), tensor_2(4,5), tensor_3(4,5))
  326. if axis is None or axis >= MAX_NUMPY_DIMS:
  327. return ravel(arrays)
  328. arr_shape = F.shape(arrays)
  329. _check_axes_range((axis,), len(arr_shape))
  330. # move axis 0 to the disiganated position, while keep other axes' relative
  331. # positions unchanged
  332. new_axes, new_shape = _move_axes_for_concatenate(arr_shape, axis)
  333. arrays = transpose(arrays, new_axes)
  334. arrays = reshape(arrays, new_shape)
  335. return arrays
  336. flattened_arrays = ()
  337. if axis is None or axis >= MAX_NUMPY_DIMS:
  338. for arr in arrays:
  339. flattened_arrays += (ravel(arr),)
  340. axis = -1
  341. flattened_arrays = _promote_type_for_concatenate(flattened_arrays)
  342. return P.Concat(axis)(flattened_arrays)
  343. # convert a list of tensor to a tuple of tensor
  344. arrays = _convert_list_tensor_to_tuple_tensor(arrays)
  345. arr_shape = F.shape(arrays[0])
  346. _check_axes_range((axis,), len(arr_shape))
  347. # if only one tensor in the tuple/list, return the tensor itself
  348. if len(arrays) == 1:
  349. return arrays[0]
  350. arrays = _promote_type_for_concatenate(arrays)
  351. return P.Concat(axis)(arrays)
  352. def append(arr, values, axis=None):
  353. """
  354. Appends values to the end of a tensor.
  355. Args:
  356. arr (Tensor): Values are appended to a copy of this tensor.
  357. values (Tensor): These values are appended to a copy of `arr`. It must be of
  358. the correct shape (the same shape as `arr`, excluding `axis`). If `axis` is
  359. not specified, `values` can be any shape and will be flattened before use.
  360. axis (None, int, optional): The `axis` along which values are appended. If `axis` is not
  361. given, both `arr` and `values` are flattened before use, default is :class:`None`.
  362. Returns:
  363. Tensor, a copy of tensor with values appended to axis.
  364. Raises:
  365. TypeError: If input arguments have types not specified above.
  366. ValueError: If specified axis exceeds `arr.ndim`.
  367. Supported Platforms:
  368. ``Ascend`` ``GPU`` ``CPU``
  369. Examples:
  370. >>> import mindspore.numpy as np
  371. >>> a = np.ones((2, 3))
  372. >>> b = np.ones((2, 1))
  373. >>> print(np.append(a, b, axis=1).shape)
  374. (2, 4)
  375. """
  376. _check_input_tensor(arr)
  377. _check_input_tensor(values)
  378. if axis is None:
  379. arr = arr.ravel()
  380. values = values.ravel()
  381. else:
  382. _check_axis_in_range(axis, arr.ndim)
  383. if F.rank(arr) != F.rank(values):
  384. _raise_value_error("all tensors must have same number of dimensions")
  385. return concatenate((arr, values), axis)
  386. def column_stack(tup):
  387. """
  388. Stacks 1-D tensors as columns into a 2-D tensor. 2-D tensors are stacked as-is,
  389. like np.hstack.
  390. Args:
  391. tup (Union[Tensor, tuple, list]): A sequence of 1-D or 2-D tensors. All
  392. of them must have the same shape except the axis to be concatenated.
  393. Returns:
  394. 2-D Tensor, formed by stacking the given tensors.
  395. Supported Platforms:
  396. ``Ascend`` ``GPU`` ``CPU``
  397. Raises:
  398. TypeError: If `tup` is not Tensor, list or tuple.
  399. ValueError: If `tup` is empty.
  400. Examples:
  401. >>> import mindspore.numpy as np
  402. >>> x1 = np.array([1, 2, 3]).astype('int32')
  403. >>> x2 = np.array([4, 5, 6]).astype('int32')
  404. >>> output = np.column_stack((x1, x2))
  405. >>> print(output)
  406. [[1 4]
  407. [2 5]
  408. [3 6]]
  409. """
  410. if isinstance(tup, Tensor):
  411. return tup
  412. if not isinstance(tup, (list, tuple)):
  413. _raise_type_error("Tensor or, list or tuple of tensors are required, but got ", tup)
  414. trans_tup = ()
  415. for tensor in tup:
  416. if tensor.ndim < 1:
  417. tensor = F.expand_dims(tensor, 0)
  418. if tensor.ndim == 1:
  419. tensor = F.expand_dims(tensor, 1)
  420. trans_tup += (tensor,)
  421. if not trans_tup:
  422. _raise_value_error("Need at least one tensor to concatenate.")
  423. return P.Concat(1)(trans_tup)
  424. def vstack(tup):
  425. """
  426. Stacks tensors in sequence vertically.
  427. This is equivalent to concatenation along the first axis. 1-D tensors should firstly be reshaped to `(1, N)`,
  428. and then be concatenated along the first axis.
  429. Args:
  430. tup (Union[Tensor, tuple, list]): A sequence of 1-D or 2-D tensors. The tensors must have the same shape
  431. along all but the first axis. 1-D tensors must have the same shape.
  432. Returns:
  433. Stacked Tensor, formed by stacking the given tensors.
  434. Supported Platforms:
  435. ``Ascend`` ``GPU`` ``CPU``
  436. Raises:
  437. TypeError: If `tup` is not Tensor, list or tuple.
  438. ValueError: If `tup` is empty.
  439. Examples:
  440. >>> import mindspore.numpy as np
  441. >>> x1 = np.array([1, 2, 3]).astype('int32')
  442. >>> x2 = np.array([4, 5, 6]).astype('int32')
  443. >>> output = np.vstack((x1, x2))
  444. >>> print(output)
  445. [[1 2 3]
  446. [4 5 6]]
  447. """
  448. if isinstance(tup, Tensor):
  449. return tup
  450. if not isinstance(tup, (list, tuple)):
  451. _raise_type_error("Tensor or, list or tuple of tensors are required, but got", tup)
  452. trans_tup = ()
  453. for tensor in tup:
  454. if tensor.ndim <= 1:
  455. tensor = _expand(tensor, 2, 0)
  456. trans_tup += (tensor,)
  457. if not trans_tup:
  458. _raise_value_error("Need at least one tensor to concatenate.")
  459. return P.Concat(0)(trans_tup)
  460. def hstack(tup):
  461. """
  462. Stacks tensors in sequence horizontally.
  463. This is equivalent to concatenation along the second axis, except for 1-D tensors
  464. where it concatenates along the first axis.
  465. Args:
  466. tup (Union[Tensor, tuple, list]): A sequence of 1-D or 2-D tensors. The
  467. tensors must have the same shape along all but the second axis, except
  468. 1-D tensors which can be any length.
  469. Returns:
  470. Stacked Tensor, formed by stacking the given tensors.
  471. Supported Platforms:
  472. ``Ascend`` ``GPU`` ``CPU``
  473. Raises:
  474. TypeError: If `tup` is not Tensor, list or tuple.
  475. ValueError: If `tup` is empty.
  476. Examples:
  477. >>> import mindspore.numpy as np
  478. >>> x1 = np.array([1, 2, 3]).astype('float32')
  479. >>> x2 = np.array([4, 5, 6]).astype('float32')
  480. >>> output = np.hstack((x1, x2))
  481. >>> print(output)
  482. [1. 2. 3. 4. 5. 6.]
  483. """
  484. if isinstance(tup, Tensor):
  485. return tup
  486. if not isinstance(tup, (list, tuple)):
  487. _raise_type_error("Tensor or, list or tuple of tensors are required, but got", tup)
  488. tuple_of_tensor = ()
  489. for tensor in tup:
  490. if tensor.ndim < 1:
  491. tensor = F.expand_dims(tensor, 0)
  492. tuple_of_tensor += (tensor,)
  493. if not tuple_of_tensor:
  494. _raise_value_error("Need at least one tensor to concatenate.")
  495. if tuple_of_tensor[0].ndim <= 1:
  496. return P.Concat(0)(tuple_of_tensor)
  497. return P.Concat(1)(tuple_of_tensor)
  498. def dstack(tup):
  499. """
  500. Stacks tensors in sequence depth wise (along the third axis).
  501. This is equivalent to concatenation along the third axis. 1-D tensors :math:`(N,)` should be
  502. reshaped to :math:`(1,N,1)`.
  503. 2-D tensors :math:`(M,N)` should be reshaped to :math:`(M,N,1)` before concatenation.
  504. Args:
  505. tup (Union[Tensor, tuple, list]): A sequence of tensors. The tensors must have the same shape along all but
  506. the third axis. 1-D or 2-D tensors must have the same shape.
  507. Returns:
  508. Stacked Tensor, formed by stacking the given tensors.
  509. Supported Platforms:
  510. ``Ascend`` ``GPU`` ``CPU``
  511. Raises:
  512. TypeError: If `tup` is not Tensor, list or tuple.
  513. ValueError: If `tup` is empty.
  514. Examples:
  515. >>> import mindspore.numpy as np
  516. >>> x1 = np.array([1, 2, 3]).astype('float32')
  517. >>> x2 = np.array([4, 5, 6]).astype('float32')
  518. >>> output = np.dstack((x1, x2))
  519. >>> print(output)
  520. [[[1. 4.]
  521. [2. 5.]
  522. [3. 6.]]]
  523. """
  524. if isinstance(tup, Tensor):
  525. return tup
  526. if not isinstance(tup, (list, tuple)):
  527. _raise_type_error("Tensor or list or tuple of tensors are required, but got", tup)
  528. trans_tup = ()
  529. for tensor in tup:
  530. if tensor.ndim <= 1:
  531. tensor = _expand(tensor, 2, 0)
  532. if tensor.ndim == 2:
  533. tensor = F.expand_dims(tensor, 2)
  534. trans_tup += (tensor,)
  535. if not trans_tup:
  536. _raise_value_error("Need at least one tensor to concatenate.")
  537. return P.Concat(2)(trans_tup)
  538. def where(condition, x=None, y=None):
  539. """
  540. Returns elements chosen from `x` or `y` depending on `condition`.
  541. Note:
  542. As nonzero is not supported, neither `x` or `y` can be None.
  543. Args:
  544. condition (Tensor): where True, yield `x`, otherwise yield `y`.
  545. x (Tensor)
  546. y (Tensor): Values from which to choose. `x`, `y` and `condition` need
  547. to be broadcastable to some shape.
  548. Returns:
  549. Tensor or scalar, with elements from `x` where `condition` is True, and
  550. elements from `y` elsewhere.
  551. Raises:
  552. ValueError: if operands cannot be broadcast.
  553. Supported Platforms:
  554. ``Ascend`` ``GPU`` ``CPU``
  555. Examples:
  556. >>> import mindspore.numpy as np
  557. >>> condition = np.full((1, 1, 2), [False, True])
  558. >>> x = np.full((1, 3, 2), 5)
  559. >>> y = np.full((2, 1, 1), 7)
  560. >>> output = np.where(condition, x, y)
  561. >>> print(output)
  562. [[[7 5]
  563. [7 5]
  564. [7 5]]
  565. [[7 5]
  566. [7 5]
  567. [7 5]]]
  568. """
  569. # type promotes input tensors
  570. dtype1 = F.dtype(x)
  571. dtype2 = F.dtype(y)
  572. dtype = _promote(dtype1, dtype2)
  573. if not _check_same_type(dtype1, dtype):
  574. x = F.cast(x, dtype)
  575. if not _check_same_type(dtype2, dtype):
  576. y = F.cast(y, dtype)
  577. is_bool = _check_same_type(dtype1, mstype.bool_) and _check_same_type(
  578. dtype2, mstype.bool_)
  579. if is_bool:
  580. # select does not support bool type for x or y
  581. x = F.cast(x, mstype.float32)
  582. y = F.cast(y, mstype.float32)
  583. # broadcasts input tensors
  584. shape_out = _infer_out_shape(F.shape(condition),
  585. F.shape(x), F.shape(y))
  586. if not _check_same_type(F.dtype(condition), mstype.float32):
  587. # tiling with bool is not supported on GPU
  588. condition = F.cast(condition, mstype.float32)
  589. condition = _broadcast_to_shape(condition, shape_out)
  590. x = _broadcast_to_shape(x, shape_out)
  591. y = _broadcast_to_shape(y, shape_out)
  592. if not _check_same_type(F.dtype(condition), mstype.bool_):
  593. condition = F.cast(condition, mstype.bool_)
  594. res = F.select(condition, x, y)
  595. if is_bool:
  596. res = F.cast(res, mstype.bool_)
  597. return res
  598. def _atleast_xd(ndim, arys):
  599. """Returns arys with at least ndim."""
  600. _check_input_tensor(*arys)
  601. res = []
  602. for arr in arys:
  603. arr = _expand(arr, ndim)
  604. res.append(arr)
  605. if len(res) == 1:
  606. return res[0]
  607. return res
  608. def atleast_1d(*arys):
  609. """
  610. Converts inputs to arrays with at least one dimension.
  611. Scalar inputs are converted to 1-dimensional arrays, whilst
  612. higher-dimensional inputs are preserved.
  613. Note:
  614. In graph mode, returns a tuple of tensor instead of a list of
  615. tensors.
  616. Args:
  617. *arys (Tensor): one or more input tensors.
  618. Returns:
  619. Tensor, or list of tensors, each with ``a.ndim >= 1``.
  620. Raises:
  621. TypeError: if the input is not a tensor.
  622. Supported Platforms:
  623. ``Ascend`` ``GPU`` ``CPU``
  624. Examples:
  625. >>> import mindspore.numpy as np
  626. >>> a = np.ones((2, 3))
  627. >>> b = np.ones(())
  628. >>> c = np.ones(5)
  629. >>> output = np.atleast_1d(a, b, c)
  630. >>> print(output)
  631. [Tensor(shape=[2, 3], dtype=Float32, value=
  632. [[1.00000000e+000, 1.00000000e+000, 1.00000000e+000],
  633. [1.00000000e+000, 1.00000000e+000, 1.00000000e+000]]),
  634. Tensor(shape=[1], dtype=Float32, value= [1.00000000e+000]),
  635. Tensor(shape=[5], dtype=Float32,
  636. value= [1.00000000e+000, 1.00000000e+000, 1.00000000e+000,
  637. 1.00000000e+000, 1.00000000e+000])]
  638. """
  639. return _atleast_xd(1, arys)
  640. def atleast_2d(*arys):
  641. """
  642. Reshapes inputs as arrays with at least two dimensions.
  643. Note:
  644. In graph mode, returns a tuple of tensor instead of a list of
  645. tensors.
  646. Args:
  647. *arys (Tensor): one or more input tensors.
  648. Returns:
  649. Tensor, or list of tensors, each with ``a.ndim >= 2``.
  650. Raises:
  651. TypeError: if the input is not a tensor.
  652. Supported Platforms:
  653. ``Ascend`` ``GPU`` ``CPU``
  654. Examples:
  655. >>> import mindspore.numpy as np
  656. >>> a = np.ones((2, 3))
  657. >>> b = np.ones(())
  658. >>> c = np.ones(5)
  659. >>> output = np.atleast_2d(a, b, c)
  660. >>> print(output)
  661. [Tensor(shape=[2, 3], dtype=Float32, value=
  662. [[1.00000000e+000, 1.00000000e+000, 1.00000000e+000],
  663. [1.00000000e+000, 1.00000000e+000, 1.00000000e+000]]),
  664. Tensor(shape=[1, 1], dtype=Float32, value= [[1.00000000e+000]]),
  665. Tensor(shape=[1, 5], dtype=Float32,
  666. value= [[1.00000000e+000, 1.00000000e+000, 1.00000000e+000,
  667. 1.00000000e+000, 1.00000000e+000]])]
  668. """
  669. return _atleast_xd(2, arys)
  670. def atleast_3d(*arys):
  671. """
  672. Reshapes inputs as arrays with at least three dimensions.
  673. Note:
  674. In graph mode, returns a tuple of tensor instead of a list of
  675. tensors.
  676. Args:
  677. *arys (Tensor): one or more input tensors.
  678. Returns:
  679. Tensor, or list of tensors, each with ``a.ndim >= 3``. For example,
  680. a 1-D array of shape `(N,)` becomes a tensor of shape `(1, N, 1)`, and
  681. a 2-D array of shape `(M, N)` becomes a tensor of shape `(M, N, 1)`.
  682. Raises:
  683. TypeError: if the input is not a tensor.
  684. Supported Platforms:
  685. ``Ascend`` ``GPU`` ``CPU``
  686. Examples:
  687. >>> import mindspore.numpy as np
  688. >>> a = np.ones((2, 3))
  689. >>> b = np.ones(())
  690. >>> c = np.ones(5)
  691. >>> output = np.atleast_3d(a, b, c)
  692. >>> print(output)
  693. [Tensor(shape=[2, 3, 1], dtype=Float32, value=
  694. [[[1.00000000e+000], [1.00000000e+000], [1.00000000e+000]],
  695. [[1.00000000e+000], [1.00000000e+000], [1.00000000e+000]]]),
  696. Tensor(shape=[1, 1, 1], dtype=Float32, value= [[[1.00000000e+000]]]),
  697. Tensor(shape=[1, 5, 1], dtype=Float32,
  698. value= [[[1.00000000e+000], [1.00000000e+000], [1.00000000e+000],
  699. [1.00000000e+000], [1.00000000e+000]]])]
  700. """
  701. res = []
  702. for arr in arys:
  703. ndim = F.rank(arr)
  704. if ndim == 0:
  705. arr = F.reshape(arr, (1, 1, 1))
  706. elif ndim == 1:
  707. arr = F.reshape(arr, (1, F.size(arr), 1))
  708. elif ndim == 2:
  709. arr = F.reshape(arr, F.shape(arr) + (1,))
  710. res.append(arr)
  711. if len(res) == 1:
  712. return res[0]
  713. return res
  714. def stack(arrays, axis=0):
  715. """
  716. Joins a sequence of arrays along a new axis.
  717. The `axis` parameter specifies the index of the new axis in the
  718. dimensions of the result. For example, if ``axis=0`` it will be the
  719. first dimension and if ``axis=-1`` it will be the last dimension.
  720. Note:
  721. Numpy argument out is not supported.
  722. Args:
  723. arrays (sequence of Tensor): Each array must have the same shape.
  724. axis (int, optional): The axis in the result array along which the
  725. input arrays are stacked.
  726. Returns:
  727. Tensor, The stacked array has one more dimension than the input
  728. arrays.
  729. Raises:
  730. ValueError: if input is not Tensor, tuple, or list.
  731. Supported Platforms:
  732. ``Ascend`` ``GPU`` ``CPU``
  733. Examples:
  734. >>> import mindspore.numpy as np
  735. >>> arrays = [np.ones((3, 4)) for _ in range(10)]
  736. >>> output = np.stack(arrays, axis=0)
  737. >>> print(output.shape)
  738. (10, 3, 4)
  739. >>> output = np.stack(arrays, axis=1)
  740. >>> print(output.shape)
  741. (3, 10, 4)
  742. >>> output = np.stack(arrays, axis=2)
  743. >>> print(output.shape)
  744. (3, 4, 10)
  745. """
  746. if isinstance(arrays, Tensor):
  747. shape = F.shape(arrays)
  748. ndim = F.rank(arrays)
  749. axis = axis % ndim
  750. axes = F.make_range(ndim)
  751. perm = axes[1:axis+1] + (0,) + axes[axis+1:]
  752. if _is_shape_empty(shape):
  753. return _empty(mstype.float32, shape[1:axis+1] + (shape[0],) + shape[axis+1:])
  754. return transpose(arrays, perm)
  755. if isinstance(arrays, (list, tuple)):
  756. shape = (len(arrays),) + F.shape(arrays[0])
  757. ndim = len(shape)
  758. axis = axis % ndim
  759. if _is_shape_empty(shape):
  760. return _empty(mstype.float32, shape[1:axis+1] + (shape[0],) + shape[axis+1:])
  761. seq = ()
  762. for arr in arrays:
  763. seq += (F.expand_dims(arr, axis),)
  764. return concatenate(seq, axis)
  765. return _raise_value_error('input arrays must be Tensor, tuple, or list')
  766. class UniqueNet(Cell):
  767. """The operation is wrapped inside a model. """
  768. def __init__(self):
  769. super(UniqueNet, self).__init__()
  770. self.unique = P.Unique()
  771. def construct(self, x):
  772. return self.unique(x)
  773. def unique(x, return_inverse=False):
  774. """
  775. Finds the unique elements of a tensor. The input tensor will be flattened first
  776. when it has more than one dimension.
  777. Note:
  778. Numpy arguments `axis`, `return_index` and `return_counts` are not supported.
  779. On CPU, this operator must be executed in graph mode.
  780. Args:
  781. x (Tensor): The input tensor to be processed.
  782. return_inverse (bool): If `True`, also return the indices of the unique tensor.
  783. Default: `False`.
  784. Returns:
  785. Tensor or tuple of Tensors.
  786. - If `return_inverse` is `False`, just return the unique tensor.
  787. - If `return_inverse` is `True`, return tuple of tensors.
  788. Supported Platforms:
  789. ``Ascend`` ``GPU`` ``CPU``
  790. Raises:
  791. TypeError: If `x` is not tensor.
  792. Examples:
  793. >>> import mindspore.numpy as np
  794. >>> from mindspore import context
  795. >>> context.set_context(mode=context.GRAPH_MODE)
  796. >>> input_x = np.asarray([1, 2, 2, 2, 3, 4, 5]).astype('int32')
  797. >>> output_x = np.unique(input_x)
  798. >>> print(output_x)
  799. [1 2 3 4 5]
  800. >>> output_x = np.unique(input_x, return_inverse=True)
  801. >>> print(output_x)
  802. (Tensor(shape=[5], dtype=Int32, value= [ 1, 2, 3, 4, 5]), Tensor(shape=[7], dtype=Int32,
  803. value= [0, 1, 1, 1, 2, 3, 4]))
  804. """
  805. _check_input_tensor(x)
  806. if F.tuple_len(F.shape(x)) > 1:
  807. x = ravel(x)
  808. uniq = UniqueNet()
  809. res = uniq(x)
  810. if not return_inverse:
  811. return res[0]
  812. return res
  813. def roll_along_axis(a, shift, axis):
  814. """
  815. Rolls a tensor along a given axis. This is a helper function of np.roll.
  816. Args:
  817. a (Tensor): Input tensor.
  818. shift (int): The number of places the tensor is shifted.
  819. axis (int): The designated axis for shifting.
  820. Returns:
  821. Shifted tensor.
  822. """
  823. _check_axis_in_range(axis, a.ndim)
  824. _check_element_int((shift, axis))
  825. if axis < 0:
  826. axis += a.ndim
  827. shift = -(shift % a.shape[axis])
  828. # if shift is 0, we do not need to roll at all
  829. if shift == 0:
  830. return a
  831. begin1 = ()
  832. begin2 = ()
  833. end1 = ()
  834. end2 = ()
  835. stride = _list_comprehensions(a.ndim, 1, True)
  836. for i in F.make_range(a.ndim):
  837. if i != axis:
  838. begin1 += (0,)
  839. end1 += (a.shape[i],)
  840. begin2 += (0,)
  841. end2 += (a.shape[i],)
  842. else:
  843. begin1 += (shift,)
  844. end1 += (a.shape[i],)
  845. begin2 += (0,)
  846. end2 += (shift,)
  847. return append(F.strided_slice(a, begin1, end1, stride),
  848. F.strided_slice(a, begin2, end2, stride), axis=axis)
  849. def roll(a, shift, axis=None):
  850. """
  851. Rolls a tensor along given axes.
  852. Elements that rolls beyond the last position are re-introduced at the first.
  853. Args:
  854. a (Tensor): Input tensor.
  855. shift (Union[int, tuple(int)]: The number of places by which elements are
  856. shifted. If a tuple, then axis must be a tuple of the same size, and
  857. each of the given axes is shifted by the corresponding number. If shift
  858. is an int while axis is a tuple of ints, then the same value is used
  859. for all given axes.
  860. axis (Union[int, tuple(int)], optional): Axis or axes along which elements
  861. are shifted. By default, the array is flattened before shifting, after
  862. which the original shape is restored.
  863. Returns:
  864. Tensor, with the same shape as a.
  865. Supported Platforms:
  866. ``Ascend`` ``GPU`` ``CPU``
  867. Raises:
  868. TypeError: If input arguments have types not specified above.
  869. ValueError: If axis exceeds `a.ndim`, or `shift` and `axis` cannot broadcast.
  870. Examples:
  871. >>> import mindspore.numpy as np
  872. >>> a = np.reshape(np.arange(12), (3, 4))
  873. >>> print(np.roll(a, [2,-3], [0,-1]))
  874. [[ 7 4 5 6]
  875. [11 8 9 10]
  876. [ 3 0 1 2]]
  877. """
  878. _check_input_tensor(a)
  879. original_shape = a.shape
  880. original_dtype = a.dtype
  881. restore_shape = False
  882. # F.strided_slice only supports float on cpu, this will change once more supports
  883. # are added.
  884. if not _check_is_float(original_dtype):
  885. a = a.astype(mstype.float32)
  886. if axis is None:
  887. restore_shape = True
  888. axis = 0
  889. a = a.ravel()
  890. # Broadcast shift and axis to the same length
  891. shift, axis = _broadcast_tuples(shift, axis)
  892. for shift_each, axis_each in zip(shift, axis):
  893. a = roll_along_axis(a, shift_each, axis_each)
  894. if restore_shape:
  895. a = a.reshape(original_shape)
  896. if not _check_is_float(original_dtype):
  897. a = a.astype(original_dtype)
  898. return a
  899. @constexpr
  900. def _get_moved_perm(ndim, source, destination):
  901. """
  902. Helper function for moveaxis, returns permutation after moving axes
  903. from source to destination.
  904. """
  905. dest_sorted_idx = [i for i, _ in sorted(enumerate(destination),
  906. key=operator.itemgetter(1))]
  907. axes_orig = [i for i in range(ndim) if i not in source]
  908. k = 0
  909. m = 0
  910. perm = []
  911. for i in dest_sorted_idx:
  912. # inserts an axis that has been moved, denoted by n, and axes that remain
  913. # in their original position, indexed from k to k + n - m, into index m in
  914. # the list of permuted axes
  915. n = destination[i]
  916. j = k + n - m
  917. perm += axes_orig[k:j]
  918. perm.append(source[i])
  919. k += n - m
  920. m = n + 1
  921. perm += axes_orig[k:]
  922. return tuple(perm)
  923. @constexpr
  924. def _get_moved_shape(shape, perm):
  925. """
  926. Helper function for moveaxis, returns the permuated shape after
  927. applying perm.
  928. """
  929. return tuple([shape[i] for i in perm])
  930. def moveaxis(a, source, destination):
  931. """
  932. Moves axes of an array to new positions.
  933. Other axes remain in their original order.
  934. Args:
  935. a (Tensor): The array whose axes should be reordered.
  936. source (int or sequence of ints): Original positions of the
  937. axes to move. These must be unique.
  938. destination (int or sequence of ints): Destination positions
  939. for each of the original axes. These must also be unique.
  940. Returns:
  941. Tensor, array with moved axes.
  942. Raises:
  943. ValueError: if axes are out of the range of ``[-a.ndim, a.ndim)``, or
  944. if the axes contain duplicates.
  945. Supported Platforms:
  946. ``Ascend`` ``GPU`` ``CPU``
  947. Examples:
  948. >>> import mindspore.numpy as np
  949. >>> x = np.zeros((3, 4, 5))
  950. >>> output = np.moveaxis(x, 0, -1)
  951. >>> print(output.shape)
  952. (4, 5, 3)
  953. >>> output = np.moveaxis(x, -1, 0)
  954. >>> print(output.shape)
  955. (5, 3, 4)
  956. >>> output = np.moveaxis(x, [0, 1, 2], [-1, -2, -3])
  957. >>> print(output.shape)
  958. (5, 4, 3)
  959. """
  960. ndim = F.rank(a)
  961. source = _check_axis_valid(source, ndim)
  962. destination = _check_axis_valid(destination, ndim)
  963. if len(source) != len(destination):
  964. _raise_value_error('`source` and `destination` arguments must have the same number of elements')
  965. perm = _get_moved_perm(ndim, source, destination)
  966. shape = F.shape(a)
  967. if _is_shape_empty(shape):
  968. return _empty(F.dtype(a), _get_moved_shape(shape, perm))
  969. return F.transpose(a, perm)
  970. def tile(a, reps):
  971. """
  972. Constructs an array by repeating `a` the number of times given by `reps`.
  973. If `reps` has length `d`, the result will have dimension of ``max(d, a.ndim)``.
  974. If ``a.ndim < d``, `a` is promoted to be d-dimensional by prepending new axes.
  975. So a shape (3,) array is promoted to (1, 3) for 2-D replication, or
  976. shape (1, 1, 3) for 3-D replication. If this is not the desired behavior,
  977. promote `a` to d-dimensions manually before calling this function.
  978. If ``a.ndim > d``, `reps` is promoted to ``a.ndim`` by pre-pending 1’s to it. Thus
  979. for an `a` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as (1, 1, 2, 2).
  980. Args:
  981. a (Tensor): The input array.
  982. reps (int or sequence of ints): The number of repetitions of `a` along
  983. each axis.
  984. Returns:
  985. Tensor, the tiled output array.
  986. Raises:
  987. TypeError: if the input is not a tensor.
  988. Supported Platforms:
  989. ``Ascend`` ``GPU`` ``CPU``
  990. Examples:
  991. >>> import mindspore.numpy as np
  992. >>> a = np.array([0, 1, 2])
  993. >>> output = np.tile(a, 2)
  994. >>> print(output)
  995. [0 1 2 0 1 2]
  996. >>> output = np.tile(a, (2, 2))
  997. >>> print(output)
  998. [[0 1 2 0 1 2]
  999. [0 1 2 0 1 2]]
  1000. >>> output = np.tile(a, (2, 1, 2))
  1001. >>> print(output)
  1002. [[[0 1 2 0 1 2]]
  1003. [[0 1 2 0 1 2]]]
  1004. """
  1005. _check_input_tensor(a)
  1006. ndim = F.rank(a)
  1007. shape = F.shape(a)
  1008. reps = _add_unit_axes(reps, ndim)
  1009. if _is_shape_empty(shape) or _is_shape_empty(reps):
  1010. shape = _add_unit_axes(shape, len(reps))
  1011. return _empty(F.dtype(a), _seq_prod(shape, reps))
  1012. return F.tile(a, reps)
  1013. @constexpr
  1014. def _check_can_broadcast_to(shape, target_shape):
  1015. """Determines if shape can be broadcast to target_shape."""
  1016. ndim = len(shape)
  1017. ndim_target = len(target_shape)
  1018. if ndim > ndim_target:
  1019. return False
  1020. for i, j in zip(reversed(shape), reversed(target_shape)):
  1021. if i not in (1, j):
  1022. return False
  1023. return True
  1024. def broadcast_to(array, shape):
  1025. """
  1026. Broadcasts an array to a new shape.
  1027. Args:
  1028. array (Tensor): The array to broadcast.
  1029. shape (tuple): The shape of the desired array.
  1030. Returns:
  1031. Tensor, original array broadcast to the given shape.
  1032. Raises:
  1033. ValueError: if array cannot be broadcast to shape.
  1034. Supported Platforms:
  1035. ``Ascend`` ``GPU`` ``CPU``
  1036. Example:
  1037. >>> import mindspore.numpy as np
  1038. >>> x = np.array([1, 2, 3])
  1039. >>> output = np.broadcast_to(x, (3, 3))
  1040. >>> print(output)
  1041. [[1 2 3]
  1042. [1 2 3]
  1043. [1 2 3]]
  1044. """
  1045. shape_a = F.shape(array)
  1046. if not _check_can_broadcast_to(shape_a, shape):
  1047. return _raise_value_error('cannot broadcast with ', shape)
  1048. return _broadcast_to_shape(array, shape)
  1049. def broadcast_arrays(*args):
  1050. """
  1051. Broadcasts any number of arrays against each other.
  1052. Note:
  1053. Numpy argument `subok` is not supported.
  1054. In graph mode, returns a tuple of Tensor instead of a list
  1055. of Tensor.
  1056. Args:
  1057. *args (Tensor): The arrays to broadcast.
  1058. Returns:
  1059. List of Tensor.
  1060. Raises:
  1061. ValueError: if arrays cannot be broadcast.
  1062. Supported Platforms:
  1063. ``Ascend`` ``GPU`` ``CPU``
  1064. Example:
  1065. >>> import mindspore.numpy as np
  1066. >>> x = np.array([[1,2,3]])
  1067. >>> y = np.array([[4],[5]])
  1068. >>> output = np.broadcast_arrays(x, y)
  1069. >>> print(output)
  1070. [Tensor(shape=[2, 3], dtype=Int32, value=
  1071. [[1, 2, 3],
  1072. [1, 2, 3]]), Tensor(shape=[2, 3], dtype=Int32, value=
  1073. [[4, 4, 4],
  1074. [5, 5, 5]])]
  1075. """
  1076. shapes = map(F.shape, args)
  1077. out_shape = _infer_out_shape(*shapes)
  1078. res = []
  1079. for arr in args:
  1080. res.append(broadcast_to(arr, out_shape))
  1081. return res
  1082. def array_split(x, indices_or_sections, axis=0):
  1083. """
  1084. Splits a tensor into multiple sub-tensors.
  1085. Note:
  1086. Currently, array_split only supports :class:`mindspore.float32` on ``CPU``.
  1087. The only difference between ``np.split`` and ``np.array_split`` is that
  1088. ``np.array_split`` allows indices_or_sections to be an integer that does not
  1089. equally divide the axis. For a tensor of length l that should be split into
  1090. n sections, it returns :math:`l % n` sub-arrays of size :math:`l//n + 1` and
  1091. the rest of size :math:`l//n`.
  1092. Args:
  1093. x (Tensor): A Tensor to be divided.
  1094. indices_or_sections (Union[int, tuple(int), list(int)]):
  1095. If integer, :math:`N`, the tensor will be divided into
  1096. :math:`N` tensors along axis.
  1097. If tuple(int), list(int) or of sorted integers,
  1098. the entries indicate where along axis the array is split.
  1099. For example, :math:`[2, 3]` would, for :math:`axis=0`, result in
  1100. three sub-tensors :math:`x[:2]`, :math:`x[2:3]`and :math:`x[3:]`.
  1101. If an index exceeds the dimension of the array along axis,
  1102. an empty sub-array is returned correspondingly.
  1103. axis (int): The axis along which to split. Default: 0.
  1104. Returns:
  1105. A list of sub-tensors.
  1106. Raises:
  1107. TypeError: If argument `indices_or_sections` is not integer,
  1108. tuple(int) or list(int) or argument `axis` is not integer.
  1109. ValueError: If argument `axis` is out of range of :math:`[-x.ndim, x.ndim)`.
  1110. Supported Platforms:
  1111. ``Ascend`` ``GPU`` ``CPU``
  1112. Examples:
  1113. >>> import mindspore.numpy as np
  1114. >>> input_x = np.arange(9).astype("float32")
  1115. >>> output = np.array_split(input_x, 4)
  1116. >>> print(output)
  1117. (Tensor(shape=[3], dtype=Float32,
  1118. value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
  1119. Tensor(shape=[2], dtype=Float32,
  1120. value= [ 3.00000000e+00, 4.00000000e+00]),
  1121. Tensor(shape=[2], dtype=Float32,
  1122. value= [ 5.00000000e+00, 6.00000000e+00]),
  1123. Tensor(shape=[2], dtype=Float32,
  1124. value= [ 7.00000000e+00, 8.00000000e+00]))
  1125. """
  1126. return _split(x, indices_or_sections, opname="array_split", axis=axis)
  1127. def split(x, indices_or_sections, axis=0):
  1128. """
  1129. Splits a tensor into multiple sub-tensors along the given axis.
  1130. Args:
  1131. x (Tensor): A Tensor to be divided.
  1132. indices_or_sections (Union[int, tuple(int), list(int)]):
  1133. If integer, :math:`N`, the tensor will be divided into
  1134. :math:`N` equal tensors along axis.
  1135. If tuple(int), list(int) or of sorted integers,
  1136. the entries indicate where along axis the array is split.
  1137. For example, :math:`[2, 3]` would, for :math:`axis=0`, result in
  1138. three sub-tensors :math:`x[:2]`, :math:`x[2:3]`and :math:`x[3:]`.
  1139. If an index exceeds the dimension of the array along axis,
  1140. an empty sub-array is returned correspondingly.
  1141. axis (int): The axis along which to split. Default: 0.
  1142. Returns:
  1143. A list of sub-tensors.
  1144. Raises:
  1145. TypeError: If argument `indices_or_sections` is not integer,
  1146. tuple(int) or list(int) or argument `axis` is not integer.
  1147. ValueError: If argument `axis` is out of range of :math:`[-x.ndim, x.ndim)`.
  1148. Supported Platforms:
  1149. ``Ascend`` ``GPU`` ``CPU``
  1150. Examples:
  1151. >>> import mindspore.numpy as np
  1152. >>> input_x = np.arange(9).astype("float32")
  1153. >>> output = np.split(input_x, 3)
  1154. >>> print(output)
  1155. (Tensor(shape=[3], dtype=Float32,
  1156. value= [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]),
  1157. Tensor(shape=[3], dtype=Float32,
  1158. value= [ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]),
  1159. Tensor(shape=[3], dtype=Float32,
  1160. value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
  1161. """
  1162. return _split(x, indices_or_sections, opname="split", axis=axis)
  1163. def _split(x, indices_or_sections, opname, axis=0):
  1164. """Splits a tensor based on ``np.split`` or ``np.array_split``."""
  1165. _check_input_tensor(x)
  1166. _ = _check_axis_type(axis, True, False, False)
  1167. axis = _canonicalize_axis(axis, x.ndim)
  1168. res = None
  1169. arr_shape = x.shape
  1170. length_along_dim = arr_shape[axis]
  1171. if isinstance(indices_or_sections, int):
  1172. if opname == "split" or length_along_dim % indices_or_sections == 0:
  1173. res = P.Split(axis, indices_or_sections)(x)
  1174. else:
  1175. num_long_tensor = length_along_dim % indices_or_sections
  1176. num_short_tensor = indices_or_sections - num_long_tensor
  1177. length1 = num_long_tensor * (length_along_dim // indices_or_sections + 1)
  1178. length2 = length_along_dim - length1
  1179. start1 = _list_comprehensions(F.rank(x), 0, True)
  1180. size1 = _tuple_setitem(arr_shape, axis, length1)
  1181. start2 = _tuple_setitem(start1, axis, length1)
  1182. size2 = _tuple_setitem(arr_shape, axis, length2)
  1183. res = P.Split(axis, num_long_tensor)(F.tensor_slice(x, start1, size1)) + \
  1184. P.Split(axis, num_short_tensor)(F.tensor_slice(x, start2, size2))
  1185. elif isinstance(indices_or_sections, (list, tuple)) and _check_element_int(indices_or_sections):
  1186. res = _split_sub_tensors(x, indices_or_sections, axis)
  1187. else:
  1188. _raise_type_error("Argument `indices_or_sections` in `mindspore.numpy.split`\
  1189. should be integer, tuple(int) or list(int), but got", indices_or_sections)
  1190. return res
  1191. def _split_sub_tensors(x, indices, axis):
  1192. """
  1193. Splits the input tensor `x` into multiple sub-tensors
  1194. along the axis according to the given indices.
  1195. """
  1196. if isinstance(indices, list):
  1197. indices.append(x.shape[axis])
  1198. elif isinstance(indices, tuple):
  1199. indices += (x.shape[axis],)
  1200. sub_tensors = []
  1201. strides = _list_comprehensions(x.ndim, 1, True)
  1202. begin = _list_comprehensions(x.ndim, 0)
  1203. end = _list_comprehensions(x.shape)
  1204. for i, idx in enumerate(indices):
  1205. begin[axis] = 0 if i == 0 else indices[i-1]
  1206. end[axis] = idx
  1207. sliced_tensor = F.strided_slice(x, _type_convert(tuple, begin), _type_convert(tuple, end), strides)
  1208. sub_tensors.append(sliced_tensor)
  1209. return sub_tensors
  1210. def vsplit(x, indices_or_sections):
  1211. """
  1212. Splits a tensor into multiple sub-tensors vertically (row-wise).
  1213. It is equivalent to split with :math:`axis=0` (default), the array is always
  1214. split along the first axis regardless of the array dimension.
  1215. Args:
  1216. x (Tensor): A Tensor to be divided.
  1217. indices_or_sections (Union[int, tuple(int), list(int)]):
  1218. If integer, :math:`N`, the tensor will be divided into
  1219. :math:`N` equal tensors along axis.
  1220. If tuple(int), list(int) or of sorted integers,
  1221. the entries indicate where along axis the array is split.
  1222. For example, :math:`[2, 3]` would, for :math:`axis=0`, result in
  1223. three sub-tensors :math:`x[:2]`, :math:`x[2:3]`and :math:`x[3:]`.
  1224. If an index exceeds the dimension of the array along axis,
  1225. an empty sub-array is returned correspondingly.
  1226. Returns:
  1227. A list of sub-tensors.
  1228. Raises:
  1229. TypeError: If argument `indices_or_sections` is not integer.
  1230. Supported Platforms:
  1231. ``Ascend`` ``GPU`` ``CPU``
  1232. Examples:
  1233. >>> import mindspore.numpy as np
  1234. >>> input_x = np.arange(9).reshape((3, 3)).astype('float32')
  1235. >>> output = np.vsplit(input_x, 3)
  1236. >>> print(output)
  1237. (Tensor(shape=[1, 3], dtype=Float32,
  1238. value=[[ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]]),
  1239. Tensor(shape=[1, 3], dtype=Float32,
  1240. value=[[ 3.00000000e+00, 4.00000000e+00, 5.00000000e+00]]),
  1241. Tensor(shape=[1, 3], dtype=Float32,
  1242. value=[[ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]]))
  1243. """
  1244. return split(x, indices_or_sections, 0)
  1245. def hsplit(x, indices_or_sections):
  1246. """
  1247. Splits a tensor into multiple sub-tensors horizontally (column-wise).
  1248. It is equivalent to split with :math:`axis=1` (default), the array is always
  1249. split along the second axis regardless of the array dimension.
  1250. Args:
  1251. x (Tensor): A Tensor to be divided.
  1252. indices_or_sections (Union[int, tuple(int), list(int)]):
  1253. If integer, :math:`N`, the tensor will be divided into
  1254. :math:`N` equal tensors along axis.
  1255. If tuple(int), list(int) or of sorted integers,
  1256. the entries indicate where along axis the array is split.
  1257. For example, :math:`[2, 3]` would, for :math:`axis=0`, result in
  1258. three sub-tensors :math:`x[:2]`, :math:`x[2:3]`and :math:`x[3:]`.
  1259. If an index exceeds the dimension of the array along axis,
  1260. an empty sub-array is returned correspondingly.
  1261. Returns:
  1262. A list of sub-tensors.
  1263. Raises:
  1264. TypeError: If argument `indices_or_sections` is not integer.
  1265. Supported Platforms:
  1266. ``Ascend`` ``GPU`` ``CPU``
  1267. Examples:
  1268. >>> import mindspore.numpy as np
  1269. >>> input_x = np.arange(6).reshape((2, 3)).astype('float32')
  1270. >>> output = np.hsplit(input_x, 3)
  1271. >>> print(output)
  1272. (Tensor(shape=[2, 1], dtype=Float32,
  1273. value=[[ 0.00000000e+00],
  1274. [ 3.00000000e+00]]),
  1275. Tensor(shape=[2, 1], dtype=Float32,
  1276. value=[[ 1.00000000e+00],
  1277. [ 4.00000000e+00]]),
  1278. Tensor(shape=[2, 1], dtype=Float32,
  1279. value=[[ 2.00000000e+00],
  1280. [ 5.00000000e+00]]))
  1281. """
  1282. return split(x, indices_or_sections, 1)
  1283. def dsplit(x, indices_or_sections):
  1284. """
  1285. Splits a tensor into multiple sub-tensors along the 3rd axis (depth).
  1286. It is equivalent to split with :math:`axis=2` (default), the array is always
  1287. split along the third axis regardless of the array dimension.
  1288. Args:
  1289. x (Tensor): A Tensor to be divided.
  1290. indices_or_sections (Union[int, tuple(int), list(int)]):
  1291. If integer, :math:`N`, the tensor will be divided into
  1292. :math:`N` equal tensors along axis.
  1293. If tuple(int), list(int) or of sorted integers,
  1294. the entries indicate where along axis the array is split.
  1295. For example, :math:`[2, 3]` would, for :math:`axis=0`, result in
  1296. three sub-tensors :math:`x[:2]`, :math:`x[2:3]`and :math:`x[3:]`.
  1297. If an index exceeds the dimension of the array along axis,
  1298. an empty sub-array is returned correspondingly.
  1299. Returns:
  1300. A list of sub-tensors.
  1301. Raises:
  1302. TypeError: If argument `indices_or_sections` is not integer.
  1303. Supported Platforms:
  1304. ``Ascend`` ``GPU`` ``CPU``
  1305. Examples:
  1306. >>> import mindspore.numpy as np
  1307. >>> input_x = np.arange(6).reshape((1, 2, 3)).astype('float32')
  1308. >>> output = np.dsplit(input_x, 3)
  1309. >>> print(output)
  1310. (Tensor(shape=[1, 2, 1], dtype=Float32,
  1311. value=[[[ 0.00000000e+00],
  1312. [ 3.00000000e+00]]]),
  1313. Tensor(shape=[1, 2, 1], dtype=Float32,
  1314. value=[[[ 1.00000000e+00],
  1315. [ 4.00000000e+00]]]),
  1316. Tensor(shape=[1, 2, 1], dtype=Float32,
  1317. value=[[[ 2.00000000e+00],
  1318. [ 5.00000000e+00]]]))
  1319. """
  1320. return split(x, indices_or_sections, 2)
  1321. @constexpr
  1322. def _get_flip_start(ndim, shape, axes):
  1323. return tuple([shape[i] - 1 if i in axes else 0 for i in range(ndim)])
  1324. @constexpr
  1325. def _get_flip_end(ndim, shape, axes):
  1326. return tuple([-shape[i] - 1 if i in axes else shape[i] + 1 for i in range(ndim)])
  1327. @constexpr
  1328. def _get_flip_strides(ndim, axes):
  1329. return tuple([-1 if i in axes else 1 for i in range(ndim)])
  1330. def flip(m, axis=None):
  1331. """
  1332. Reverses the order of elements in an array along the given axis.
  1333. The shape of the array is preserved, but the elements are reordered.
  1334. Note:
  1335. On CPU, the supported dtypes are np.float16, np.float32, and np.float64.
  1336. Args:
  1337. m (Tensor): Input array.
  1338. axis (None or int or tuple of ints, optional): Axis or axes along which
  1339. to flip over. The default, ``axis=None``, will flip over all of the axes
  1340. of the input array. If `axis` is negative it counts from the last to
  1341. the first axis. If `axis` is a tuple of ints, flipping is performed on
  1342. all of the axes specified in the tuple.
  1343. Returns:
  1344. Tensor, with the entries of `axis` reversed.
  1345. Raises:
  1346. TypeError: if the input is not a tensor.
  1347. Supported Platforms:
  1348. ``GPU``
  1349. Example:
  1350. >>> import mindspore.numpy as np
  1351. >>> A = np.arange(8.0).reshape((2,2,2))
  1352. >>> output = np.flip(A)
  1353. >>> print(output)
  1354. [[[7, 6],
  1355. [5, 4]],
  1356. [[3, 2],
  1357. [1, 0]]]
  1358. >>> output = np.flip(A, (0, 2))
  1359. >>> print(output)
  1360. [[[5, 4],
  1361. [7, 6]],
  1362. [[1, 0],
  1363. [3, 2]]]
  1364. """
  1365. _check_input_tensor(m)
  1366. ndim = F.rank(m)
  1367. axes = _check_axis_valid(axis, ndim)
  1368. shape = F.shape(m)
  1369. dtype = F.dtype(m)
  1370. if _is_shape_empty(shape):
  1371. return m
  1372. if not _check_is_float(dtype):
  1373. m = m.astype(mstype.float32)
  1374. start = _get_flip_start(ndim, shape, axes)
  1375. end = _get_flip_end(ndim, shape, axes)
  1376. strides = _get_flip_strides(ndim, axes)
  1377. res = F.strided_slice(m, start, end, strides)
  1378. if not _check_same_type(F.dtype(res), dtype):
  1379. res = F.cast(res, dtype)
  1380. return res
  1381. def flipud(m):
  1382. """
  1383. Flips the entries in each column in the up/down direction.
  1384. Rows are preserved, but appear in a different order than before.
  1385. Note:
  1386. On CPU, the supported dtypes are np.float16, np.float32, and np.float64.
  1387. Args:
  1388. m (Tensor): Input array.
  1389. Returns:
  1390. Tensor.
  1391. Raises:
  1392. TypeError: if the input is not a tensor.
  1393. Supported Platforms:
  1394. ``GPU``
  1395. Example:
  1396. >>> import mindspore.numpy as np
  1397. >>> A = np.arange(8.0).reshape((2,2,2))
  1398. >>> output = np.flipud(A)
  1399. >>> print(output)
  1400. [[[4., 5.],
  1401. [6., 7.]],
  1402. [[0., 1.],
  1403. [2., 3.]]]
  1404. """
  1405. return flip(m, 0)
  1406. def fliplr(m):
  1407. """
  1408. Flips the entries in each row in the left/right direction.
  1409. Columns are preserved, but appear in a different order than before.
  1410. Note:
  1411. On CPU, the supported dtypes are np.float16, np.float32, and np.float64.
  1412. Args:
  1413. m (Tensor): Input array.
  1414. Returns:
  1415. Tensor.
  1416. Raises:
  1417. TypeError: if the input is not a tensor.
  1418. Supported Platforms:
  1419. ``GPU``
  1420. Example:
  1421. >>> import mindspore.numpy as np
  1422. >>> A = np.arange(8.0).reshape((2,2,2))
  1423. >>> output = np.fliplr(A)
  1424. >>> print(output)
  1425. [[[2., 3.],
  1426. [0., 1.]],
  1427. [[6., 7.],
  1428. [4., 5.]]]
  1429. """
  1430. return flip(m, 1)
  1431. def take_along_axis(arr, indices, axis):
  1432. """
  1433. Takes values from the input array by matching 1d index and data slices.
  1434. This iterates over matching 1d slices oriented along the specified axis in the
  1435. index and data arrays, and uses the former to look up values in the latter.
  1436. These slices can be different lengths.
  1437. Args:
  1438. arr (Tensor): Source array with shape `(Ni…, M, Nk…)`.
  1439. indices (Tensor): Indices with shape `(Ni…, J, Nk…)` to take along each 1d
  1440. slice of `arr`. This must match the dimension of `arr`, but dimensions `Ni`
  1441. and `Nj` only need to broadcast against `arr`.
  1442. axis (int): The axis to take 1d slices along. If `axis` is None, the input
  1443. array is treated as if it had first been flattened to 1d.
  1444. Returns:
  1445. Tensor, the indexed result, with shape `(Ni…, J, Nk…)`.
  1446. Raises:
  1447. ValueError: if input array and indices have different number of dimensions.
  1448. TypeError: if the input is not a Tensor.
  1449. Supported Platforms:
  1450. ``Ascend`` ``GPU`` ``CPU``
  1451. Example:
  1452. >>> import mindspore.numpy as np
  1453. >>> x = np.arange(12).reshape(3, 4)
  1454. >>> indices = np.arange(3).reshape(1, 3)
  1455. >>> output = np.take_along_axis(x, indices, 1)
  1456. >>> print(output)
  1457. [[ 0 1 2]
  1458. [ 4 5 6]
  1459. [ 8 9 10]]
  1460. """
  1461. _check_input_tensor(arr, indices)
  1462. if axis is None:
  1463. arr = ravel(arr)
  1464. axis = 0
  1465. ndim = F.rank(arr)
  1466. if ndim != F.rank(indices):
  1467. _raise_value_error('`indices` and `arr` must have the same number of dimensions')
  1468. _check_axis_in_range(axis, ndim)
  1469. axis = axis + ndim if axis < 0 else axis
  1470. shape_arr = F.shape(arr)
  1471. shape_indices = F.shape(indices)
  1472. # broadcasts indices against the shape of arr except at axis
  1473. indices = _broadcast_to(indices, _tuple_getitem(shape_indices, axis, False),
  1474. _tuple_getitem(shape_arr, axis, False), ndim)
  1475. indices = _broadcast_to(indices, _tuple_getitem(shape_arr, axis + 1, False) +
  1476. _tuple_getitem(shape_indices, axis + 1), shape_arr, ndim)
  1477. return F.gather_d(arr, axis, indices)
  1478. def _mod(x, y):
  1479. """Computes x mod y."""
  1480. quotient = F.tensor_floordiv(x, y)
  1481. prod = F.tensor_mul(y, quotient)
  1482. return F.tensor_sub(x, prod)
  1483. def _check_indices(size, indices, mode):
  1484. """Checks whether indices are out of bounds."""
  1485. shape = F.shape(indices)
  1486. dtype = F.dtype(indices)
  1487. lowerbounds = F.fill(dtype, shape, -size)
  1488. upperbounds = F.fill(dtype, shape, size - 1)
  1489. out_of_lowerbounds = F.tensor_lt(indices, lowerbounds)
  1490. out_of_upperbounds = F.tensor_gt(indices, upperbounds)
  1491. if mode == 'raise':
  1492. _raise_unimplemented_error('"raise" mode is not implemented')
  1493. if mode == 'wrap':
  1494. return _mod(indices, F.fill(dtype, shape, size))
  1495. zeros = F.fill(dtype, shape, 0)
  1496. clipped = F.select(out_of_lowerbounds, zeros, indices)
  1497. clipped = F.select(out_of_upperbounds, upperbounds, clipped)
  1498. return clipped
  1499. def take(a, indices, axis=None, mode='clip'):
  1500. """
  1501. Takes elements from an array along an axis.
  1502. When axis is not None, this function does the same thing as “fancy” indexing
  1503. (indexing arrays using arrays); however, it can be easier to use if you need
  1504. elements along a given axis. A call such as ``np.take(arr, indices, axis=3)`` is
  1505. equivalent to ``arr[:,:,:,indices,...]``.
  1506. Note:
  1507. Numpy argument out is not supported.
  1508. ``mode = 'raise'`` is not supported, and the default mode is 'clip' instead.
  1509. Args:
  1510. a (Tensor): Source array with shape `(Ni…, M, Nk…)`.
  1511. indices (Tensor): The indices with shape `(Nj...)` of the values to extract.
  1512. axis (int, optional): The axis over which to select values. By default,
  1513. the flattened input array is used.
  1514. mode (‘raise’, ‘wrap’, ‘clip’, optional): Specifies how out-of-bounds
  1515. indices will behave.
  1516. ‘raise’ – raise an error (default);
  1517. ‘wrap’ – wrap around;
  1518. ‘clip’ – clip to the range. ‘clip’ mode means that all indices that are
  1519. too large are replaced by the index that addresses the last element
  1520. along that axis. Note that this disables indexing with negative numbers.
  1521. Returns:
  1522. Tensor, the indexed result.
  1523. Raises:
  1524. ValueError: if axis is out of range.
  1525. TypeError: if the input is not a Tensor.
  1526. Supported Platforms:
  1527. ``Ascend`` ``GPU`` ``CPU``
  1528. Examples:
  1529. >>> import mindspore.numpy as np
  1530. >>> a = np.array([4, 3, 5, 7, 6, 8])
  1531. >>> indices = np.array([0, 1, 4])
  1532. >>> output = np.take(a, indices)
  1533. >>> print(output)
  1534. [4 3 6]
  1535. >>> indices = np.array([[0, 1], [2, 3]])
  1536. >>> output = np.take(a, indices)
  1537. >>> print(output)
  1538. [[4 3]
  1539. [5 7]]
  1540. """
  1541. _check_input_tensor(a, indices)
  1542. if mode not in ('raise', 'wrap', 'clip'):
  1543. _raise_value_error('raise should be one of "raise", "wrap", or "clip"')
  1544. if axis is None:
  1545. a = ravel(a)
  1546. axis = 0
  1547. ndim = F.rank(a)
  1548. _check_axis_in_range(axis, ndim)
  1549. axis = axis + ndim if axis < 0 else axis
  1550. shape_a = F.shape(a)
  1551. shape_indices = F.shape(indices)
  1552. size_indices = indices.size
  1553. indices = _check_indices(shape_a[axis], indices, mode)
  1554. # reshapes indices to shape (Ni..., Nj..., Nk)
  1555. shape_ni = _tuple_getitem(shape_a, axis, False)
  1556. shape_nk = _tuple_getitem(shape_a, axis + 1)
  1557. shape_out = shape_ni + shape_indices + shape_nk
  1558. shape_indices = _expanded_shape(ndim, size_indices, axis)
  1559. indices = F.reshape(indices, shape_indices)
  1560. shape_indices = shape_ni + (indices.size,) + shape_nk
  1561. indices = _broadcast_to_shape(indices, shape_indices)
  1562. res = F.gather_d(a, axis, indices)
  1563. return F.reshape(res, shape_out)
  1564. def repeat(a, repeats, axis=None):
  1565. """
  1566. Repeats elements of an array.
  1567. Args:
  1568. a (Tensor): Input array.
  1569. repeats (int or sequence of ints): The number of repetitions for each element.
  1570. `repeats` is broadcasted to fit the shape of the given axis.
  1571. axis (int, optional): The axis along which to repeat values. By default,
  1572. use the flattened input array, and return a flat output array.
  1573. Returns:
  1574. Tensor, output array which has the same shape as `a`, except along the given
  1575. axis.
  1576. Raises:
  1577. ValueError: if axis is out of range.
  1578. TypeError: if input `a` is not a Tensor.
  1579. Supported Platforms:
  1580. ``Ascend`` ``GPU`` ``CPU``
  1581. Examples:
  1582. >>> import mindspore.numpy as np
  1583. >>> output = np.repeat(np.array(3), 4)
  1584. >>> print(output)
  1585. [3 3 3 3]
  1586. >>> x = np.array([[1,2],[3,4]])
  1587. >>> output = np.repeat(x, 2)
  1588. >>> print(output)
  1589. [1 1 2 2 3 3 4 4]
  1590. >>> output = np.repeat(x, 3, axis=1)
  1591. >>> print(output)
  1592. [[1 1 1 2 2 2]
  1593. [3 3 3 4 4 4]]
  1594. >>> output = np.repeat(x, [1, 2], axis=0)
  1595. >>> print(output)
  1596. [[1 2]
  1597. [3 4]
  1598. [3 4]]
  1599. """
  1600. _check_input_tensor(a)
  1601. if not isinstance(repeats, (tuple, list)):
  1602. repeats = (repeats,)
  1603. _check_element_int(repeats)
  1604. if axis is None:
  1605. a = ravel(a)
  1606. axis = 0
  1607. ndim = F.rank(a)
  1608. _check_axis_in_range(axis, ndim)
  1609. axis = axis + ndim if axis < 0 else axis
  1610. if len(repeats) == 1:
  1611. repeats = repeats[0]
  1612. if repeats == 0:
  1613. return _empty(F.dtype(a), (0,))
  1614. return C.repeat_elements(a, repeats, axis)
  1615. shape = F.shape(a)
  1616. size = shape[axis]
  1617. if len(repeats) != size:
  1618. _raise_value_error('operands could not be broadcast together')
  1619. subs = split(a, size, axis)
  1620. repeated_subs = []
  1621. for sub, rep in zip(subs, repeats):
  1622. if rep != 0:
  1623. repeated_subs.append(C.repeat_elements(sub, rep, axis))
  1624. return concatenate(repeated_subs, axis)
  1625. def rot90(a, k=1, axes=(0, 1)):
  1626. """
  1627. Rotates a tensor by 90 degrees in the plane specified by axes.
  1628. Rotation direction is from the first towards the second axis.
  1629. Args:
  1630. a (Tensor): Input tensor of two or more dimensions.
  1631. k (int): Number of times the tensor is rotated by 90 degrees. Default: 1.
  1632. axes (Union[tuple(int), list(int)]): The tensor is rotated in the plane
  1633. defined by the axes. Default: `(0, 1)`.
  1634. Axes must be different and with the shape of `(2,)`.
  1635. Returns:
  1636. Tensor.
  1637. Raises:
  1638. TypeError: if input `a` is not a Tensor or
  1639. the argument `k` is not integer or
  1640. the argument `axes` is not tuple of ints or list of ints.
  1641. ValueError: if any axis is out of range or
  1642. the length of `axes` is not `2`.
  1643. Supported Platforms:
  1644. ``GPU``
  1645. Examples:
  1646. >>> import mindspore.numpy as np
  1647. >>> a = np.arange(24).reshape((2, 3, 4))
  1648. >>> output = np.rot90(a)
  1649. >>> print(output)
  1650. [[[ 8 9 10 11]
  1651. [20 21 22 23]]
  1652. [[ 4 5 6 7]
  1653. [16 17 18 19]]
  1654. [[ 0 1 2 3]
  1655. [12 13 14 15]]]
  1656. >>> output = np.rot90(a, 3, (1, 2))
  1657. >>> print(output)
  1658. [[[ 8 4 0]
  1659. [ 9 5 1]
  1660. [10 6 2]
  1661. [11 7 3]]
  1662. [[20 16 12]
  1663. [21 17 13]
  1664. [22 18 14]
  1665. [23 19 15]]]
  1666. """
  1667. _check_input_tensor(a)
  1668. if not isinstance(k, int):
  1669. _raise_type_error("integer argument expected, but got ", k)
  1670. k = k % 4 if k >= 0 else 4 - (-k % 4)
  1671. if not isinstance(axes, (tuple, list)):
  1672. _raise_type_error("tuple(ints) or list(ints) expected, but got ", axes)
  1673. if len(axes) != 2:
  1674. _raise_value_error("len(axes) must be 2.")
  1675. axis1, axis2 = axes[0], axes[1]
  1676. axis1 = _canonicalize_axis(axis1, a.ndim)
  1677. axis2 = _canonicalize_axis(axis2, a.ndim)
  1678. if axis1 == axis2:
  1679. _raise_value_error('Axes must be different.')
  1680. if k == 0:
  1681. return a
  1682. if k == 2:
  1683. return flip(flip(a, axis1), axis2)
  1684. perm = _list_comprehensions(a.ndim)
  1685. perm[axis1], perm[axis2] = perm[axis2], perm[axis1]
  1686. if k == 1:
  1687. return flip(transpose(a, perm), axis1)
  1688. return flip(transpose(a, perm), axis2)
  1689. def select(condlist, choicelist, default=0):
  1690. """
  1691. Returns an array drawn from elements in `choicelist`, depending on conditions.
  1692. Args:
  1693. condlist (array_like): The list of conditions which determine from which array
  1694. in `choicelist` the output elements are taken. When multiple conditions are
  1695. satisfied, the first one encountered in `condlist` is used.
  1696. choicelist (array_like): The list of arrays from which the output elements are
  1697. taken. It has to be of the same length as `condlist`.
  1698. default (scalar, optional): The element inserted in output when all conditions
  1699. evaluate to `False`.
  1700. Returns:
  1701. Tensor, the output at position `m` is the `m-th` element of the array in
  1702. `choicelist` where the `m-th` element of the corresponding array in `condlist`
  1703. is `True`.
  1704. Supported Platforms:
  1705. ``Ascend`` ``GPU`` ``CPU``
  1706. Raises:
  1707. ValueError: if ``len(condlist) != len(choicelist)``.
  1708. Examples:
  1709. >>> condlist = [[True, True, True, False, False], [False, False, True, False, True]]
  1710. >>> choicelist = [[0, 1, 2, 3, 4], [0, 1, 4, 9, 16]]
  1711. >>> output = np.select(condlist, choicelist)
  1712. >>> print(output)
  1713. [ 0 1 2 0 16]
  1714. """
  1715. condlist, choicelist = _to_tensor(condlist, choicelist)
  1716. shape_cond = F.shape(condlist)
  1717. shape_choice = F.shape(choicelist)
  1718. if F.rank(condlist) == 0 or F.rank(condlist) == 0:
  1719. _raise_value_error('input cannot be scalars')
  1720. case_num = shape_cond[0]
  1721. if shape_choice[0] != case_num:
  1722. _raise_value_error('list of cases must be same length as list of conditions')
  1723. # performs broadcast over the cases in condlist and choicelist
  1724. case_size = _infer_out_shape(shape_cond[1:], shape_choice[1:])
  1725. shape_broadcasted = (case_num,) + case_size
  1726. ndim = len(shape_broadcasted)
  1727. shape_cond_expanded = ((case_num,) + _list_comprehensions(ndim - F.rank(condlist), 1, True) +
  1728. shape_cond[1:])
  1729. condlist = _broadcast_to_shape(F.reshape(condlist, shape_cond_expanded), shape_broadcasted)
  1730. shape_choice_expanded = ((case_num,) + _list_comprehensions(ndim - F.rank(choicelist), 1, True) +
  1731. shape_choice[1:])
  1732. choicelist = _broadcast_to_shape(F.reshape(choicelist, shape_choice_expanded), shape_broadcasted)
  1733. slice_start = _list_comprehensions(ndim - 1, 0, True)
  1734. slice_size = (1,) + case_size
  1735. dtype = F.dtype(choicelist)
  1736. if _get_device() == 'CPU' and not _check_is_float(dtype):
  1737. # F.tensor_slice only supports float on CPU
  1738. choicelist = F.cast(choicelist, mstype.float32)
  1739. default_slice = F.fill(F.dtype(choicelist), slice_size, default)
  1740. for i in range(case_num - 1, -1, -1):
  1741. cond_slice = F.tensor_slice(condlist.astype(mstype.float32), (i,) + slice_start, slice_size)
  1742. choice_slice = F.tensor_slice(choicelist, (i,) + slice_start, slice_size)
  1743. default_slice = F.select(cond_slice.astype(mstype.bool_), choice_slice, default_slice)
  1744. return F.reshape(default_slice, (case_size)).astype(dtype)