You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

array_ops.py 32 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """array operations, the function docs are adapted from Numpy API."""
  16. from ..common import dtype as mstype
  17. from ..ops import operations as P
  18. from ..ops import functional as F
  19. from ..ops.primitive import constexpr
  20. from ..nn import Cell
  21. from .utils import _convert_list_tensor_to_tuple_tensor, _expand, _broadcast_to, \
  22. _is_empty
  23. from .utils_const import _check_is_int, _check_axes_range, _check_start_normalize, \
  24. _check_is_tensor, _check_is_tuple, _check_is_list, _raise_type_error, _raise_value_error, \
  25. _infer_out_shape, _empty, _promote, _check_same_type, _check_input_tensor
  26. # According to official numpy reference, the dimension of a numpy array must be less
  27. # than 32
  28. MAX_NUMPY_DIMS = 32
  29. @constexpr
  30. def _prepare_shape_for_expand_dims(shape, axes):
  31. """
  32. Creates the expanded new shape based on the shape and given axes
  33. Args:
  34. shape (tuple): the shape of the tensor
  35. axes Union(int, tuple(int), list(int)): the axes with dimensions expanded.
  36. Returns:
  37. new_shape(tuple): the shape with dimensions expanded.
  38. """
  39. new_shape = []
  40. shape_idx = 0
  41. new_shape_length = len(shape)
  42. # Convert to set
  43. if isinstance(axes, int):
  44. new_shape_length += 1
  45. if axes >= new_shape_length or axes < -new_shape_length:
  46. raise ValueError(f"axis {axes} is out of bounds for tensor of dimension {new_shape_length}")
  47. axes = {axes}
  48. elif isinstance(axes, (list, tuple)):
  49. new_shape_length += len(axes)
  50. for axis in axes:
  51. if axis >= new_shape_length or axis < -new_shape_length:
  52. raise ValueError(f"axis {axis} is out of bounds for tensor of dimension {new_shape_length}")
  53. axes = set(axes)
  54. else:
  55. raise TypeError(f"only int, tuple and list are allowed for axes, but got {type(axes)}")
  56. for new_shape_idx in range(new_shape_length):
  57. if new_shape_idx in axes or new_shape_idx - new_shape_length in axes:
  58. new_shape.append(1)
  59. else:
  60. new_shape.append(shape[shape_idx])
  61. shape_idx += 1
  62. return tuple(new_shape)
  63. def expand_dims(a, axis):
  64. """
  65. Expands the shape of a tensor.
  66. Inserts a new axis that will appear at the axis position in the expanded tensor shape.
  67. Args:
  68. a (Tensor): Input tensor array.
  69. axis Union[int, list(int), tuple(int)]: Position in the expanded axes where
  70. the new axis is placed,
  71. Returns:
  72. Tensor, view of a tensor with the number of dimensions increased.
  73. Raises:
  74. TypeError: If input arguments have types not specified above.
  75. ValueError: If axis exceeds a.ndim.
  76. Supported Platforms:
  77. ``Ascend`` ``GPU`` ``CPU``
  78. Examples:
  79. >>> import mindspore.numpy as np
  80. >>> x = np.ones((2,2))
  81. >>> x = np.expand_dims(x,0)
  82. >>> print(x.shape)
  83. (1, 2, 2)
  84. """
  85. if not _check_is_tensor(F.typeof(a)):
  86. _raise_type_error("Input is not Tensor.")
  87. shape = F.shape(a)
  88. # yield expanded shape based on the axes
  89. new_shape = _prepare_shape_for_expand_dims(shape, axis)
  90. return F.reshape(a, new_shape)
  91. def squeeze(a, axis=None):
  92. """
  93. Removes single-dimensional entries from the shape of an tensor.
  94. Args:
  95. a (Tensor): Input tensor array.
  96. axis: Union[None, int, list(int), tuple(list)]. Default is None.
  97. Returns:
  98. Tensor, with all or a subset of the dimensions of length 1 removed.
  99. Raises:
  100. TypeError: If input arguments have types not specified above.
  101. ValueError: If specified axis has shape entry > 1.
  102. Supported Platforms:
  103. ``Ascend`` ``GPU`` ``CPU``
  104. Examples:
  105. >>> import mindspore.numpy as np
  106. >>> x = np.ones((1,2,2,1))
  107. >>> x = np.squeeze(x)
  108. >>> print(x.shape)
  109. (2, 2)
  110. """
  111. if not _check_is_tensor(F.typeof(a)):
  112. _raise_type_error("Input is not Tensor.")
  113. return a.squeeze(axis)
  114. def transpose(a, axes=None):
  115. """
  116. Reverses or permutes the axes of a tensor; returns the modified tensor.
  117. Args:
  118. a (Tensor): a tensor to be transposed
  119. axes (Union[None, tuple, list]): the axes order, if axes is None, transpose
  120. the entire tensor. Default is None.
  121. Returns:
  122. Tensor, the transposed tensor array.
  123. Raises:
  124. TypeError: If input arguments have types not specified above.
  125. ValueError: If the number of axes is not euqal to a.ndim.
  126. Supported Platforms:
  127. ``Ascend`` ``GPU`` ``CPU``
  128. Examples:
  129. >>> import mindspore.numpy as np
  130. >>> x = np.ones((1,2,3))
  131. >>> x = np.transpose(x)
  132. >>> print(x.shape)
  133. (3, 2, 1)
  134. """
  135. if not _check_is_tensor(F.typeof(a)):
  136. _raise_type_error("Input is not Tensor.")
  137. return a.transpose(axes)
  138. def rollaxis(x, axis, start=0):
  139. """
  140. Rolls the specified axis backwards, until it lies in the given position.
  141. The positions of the other axes do not change relative to one another.
  142. Args:
  143. x (Tensor): A Tensor to be transposed.
  144. axis (int): The axis to be rolled.
  145. start (int):
  146. - When start >= 0:
  147. - When start <= axis: the axis is rolled back until it lies in
  148. this position (start).
  149. - When start > axis: the axis is rolled until it lies before this
  150. position (start).
  151. - When start < 0: the start will be normalized as follows:
  152. start ........... Normalized start
  153. -(x.ndim+1) raise ValueError
  154. -x.ndim 0
  155. ... ...
  156. -1 x.ndim-1
  157. 0 0
  158. ... ...
  159. x.ndim x.ndim
  160. x.ndim+1 raise ValueError
  161. Returns:
  162. Transposed Tensor. Has the same data type as the original tensor x.
  163. Supported Platforms:
  164. ``Ascend`` ``GPU`` ``CPU``
  165. Raises:
  166. TypeError: If axis or start is not integer, or x is not tensor.
  167. ValueError: If axis is not in the range from -ndim to ndim-1 or
  168. start is not in the range from -ndim to ndim.
  169. Examples:
  170. >>> import mindspore.numpy as np
  171. >>> x = np.ones((2,3,4))
  172. >>> output = np.rollaxis(x, 0, 2)
  173. >>> print(output.shape)
  174. (3, 2, 4)
  175. """
  176. if not _check_is_tensor(F.typeof(x)):
  177. _raise_type_error("Input is not Tensor.")
  178. if not _check_is_int(axis):
  179. _raise_type_error("integer argument expected, but got ", axis)
  180. if not _check_is_int(start):
  181. _raise_type_error("integer argument expected, but got ", start)
  182. shape = F.shape(x)
  183. ndim = F.tuple_len(shape)
  184. axis = _check_axes_range(axis, ndim)
  185. start = _check_start_normalize(start, ndim)
  186. if start - axis >= 0 and start - axis <= 1:
  187. return x
  188. perm = F.make_range(0, ndim)
  189. new_perm = None
  190. if start < axis:
  191. if axis + 1 < ndim:
  192. new_perm = perm[0:start] + perm[axis:axis+1] + \
  193. perm[start:axis] + perm[axis+1:]
  194. else:
  195. new_perm = perm[0:start] + perm[axis:axis+1] + perm[start:axis]
  196. if start > axis:
  197. if start < ndim:
  198. new_perm = perm[0:axis] + perm[axis+1:start] + \
  199. perm[axis:axis+1] + perm[start:]
  200. else:
  201. new_perm = perm[0:axis] + perm[axis+1:start] + \
  202. perm[axis:axis+1]
  203. return F.transpose(x, new_perm)
  204. def swapaxes(x, axis1, axis2):
  205. """
  206. Interchanges two axes of a tensor.
  207. Args:
  208. x (Tensor): A tensor to be transposed.
  209. axis1 (int): First axis.
  210. axis2 (int): Second axis.
  211. Returns:
  212. Transposed tensor, has the same data type as the original tensor x.
  213. Raises:
  214. TypeError: If axis1 or axis2 is not integer, or x is not tensor.
  215. ValueError: If axis1 or axis2 is not in the range from -ndim to ndim-1.
  216. Supported Platforms:
  217. ``Ascend`` ``GPU`` ``CPU``
  218. Examples:
  219. >>> import mindspore.numpy as np
  220. >>> x = np.ones((2,3,4))
  221. >>> output = np.swapaxes(x, 0, 2)
  222. >>> print(output.shape)
  223. (4,3,2)
  224. """
  225. if not _check_is_tensor(F.typeof(x)):
  226. _raise_type_error("Input is not Tensor.")
  227. return x.swapaxes(axis1, axis2)
  228. def reshape(x, new_shape):
  229. """
  230. Reshapes a tensor without changing its data.
  231. Args:
  232. x (Tensor): A tensor to be reshaped.
  233. new_shape (Union[int, list(int), tuple(int)]): The new shape should be
  234. compatible with the original shape. If the tuple has only one element,
  235. the result will be a 1-D tensor of that length. One shape dimension
  236. can be -1. In this case, the value is inferred from the length of
  237. the tensor and remaining dimensions.
  238. Returns:
  239. Reshaped Tensor. Has the same data type as the original tensor x.
  240. Raises:
  241. TypeError: If new_shape is not integer, list or tuple, or x is not tensor.
  242. ValueError: If new_shape does not compatible with the original shape.
  243. Supported Platforms:
  244. ``Ascend`` ``GPU`` ``CPU``
  245. Examples:
  246. >>> import mindspore.numpy as np
  247. >>> x = np.asarray([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]])
  248. >>> output = np.reshape(x, (3, 2))
  249. >>> print(output)
  250. [[-0.1 0.3]
  251. [ 3.6 0.4]
  252. [ 0.5 -3.2]]
  253. >>> output = np.reshape(x, (3, -1))
  254. >>> print(output)
  255. [[-0.1 0.3]
  256. [ 3.6 0.4]
  257. [ 0.5 -3.2]]
  258. >>> output = np.reshape(x, (6, ))
  259. >>> print(output)
  260. [-0.1 0.3 3.6 0.4 0.5 -3.2]
  261. """
  262. if not _check_is_tensor(F.typeof(x)):
  263. _raise_type_error("Input is not Tensor.")
  264. return x.reshape(new_shape)
  265. def ravel(x):
  266. """
  267. Returns a contiguous flattened tensor.
  268. A 1-D tensor, containing the elements of the input, is returned.
  269. Args:
  270. x (Tensor): A tensor to be flattened.
  271. Returns:
  272. Flattened tensor, has the same data type as the original tensor x.
  273. Raises:
  274. TypeError: If x is not tensor.
  275. Supported Platforms:
  276. ``Ascend`` ``GPU`` ``CPU``
  277. Examples:
  278. >>> import mindspore.numpy as np
  279. >>> x = np.ones((2,3,4))
  280. >>> output = np.ravel(x)
  281. >>> print(output.shape)
  282. (24,)
  283. """
  284. if not _check_is_tensor(F.typeof(x)):
  285. _raise_type_error("Input is not Tensor.")
  286. return x.ravel()
  287. @constexpr
  288. def _move_axes_for_concatenate(arr_shape, axis):
  289. """
  290. Moves axis 0 to the desiganated position, while keeps other axes' relative
  291. positions unchanged, only used if a single tensor is concatenated.
  292. """
  293. original_axes = tuple(range(len(arr_shape)))
  294. new_axes = original_axes[1:axis+1] + (0,) + original_axes[axis+1:]
  295. new_shape = arr_shape[1:axis+1] + (arr_shape[0] * arr_shape[axis+1],) + \
  296. arr_shape[axis+2:]
  297. return new_axes, new_shape
  298. def _promote_type_for_concatenate(tuple_of_tensors):
  299. """
  300. Checks dtype for all tensors in the tuple. If dtypes are not the same, promote
  301. them to the `highest` dtype in the tuple, so that they are ready for the concat
  302. operator.
  303. Args:
  304. tuple_of_tensors(tuple(tensor)): A tuple of tensors
  305. Returns:
  306. tuple of tensors, with each tensor promoted to ths same dtype.
  307. """
  308. need_cast = False
  309. final_type = tuple_of_tensors[0].dtype
  310. for tensor in tuple_of_tensors:
  311. if not _check_same_type(final_type, tensor.dtype):
  312. need_cast = True
  313. final_type = _promote(final_type, tensor.dtype)
  314. if not need_cast:
  315. return tuple_of_tensors
  316. tuple_of_casted_tensors = ()
  317. for tensor in tuple_of_tensors:
  318. tuple_of_casted_tensors += (tensor.astype(final_type, copy=False),)
  319. return tuple_of_casted_tensors
  320. def concatenate(arrays, axis=0):
  321. """
  322. Joins a sequence of tensors along an existing axis.
  323. Args:
  324. arrays: Union[Tensor, tuple(Tensor), list(Tensor)], a tensor or a list
  325. of tensors to be concatenated.
  326. axis (int, optional): The axis along which the tensors will be joined,
  327. if axis is None, tensors are flattened before use. Default is 0.
  328. Returns:
  329. Tensor, a tensor concatenated from a tensor or a list of tensors.
  330. Raises:
  331. TypeError: If input arguments have types not specified above.
  332. ValueError: If specified axis < 0, and exceeds tensor.ndim.
  333. Supported Platforms:
  334. ``Ascend`` ``GPU`` ``CPU``
  335. Examples:
  336. >>> import mindspore.numpy as np
  337. >>> x1 = np.ones((1,2,3))
  338. >>> x2 = np.ones((1,2,1))
  339. >>> x = np.concatenate((x1, x2), axis=-1)
  340. >>> print(x.shape)
  341. (1, 2, 4)
  342. """
  343. array_type = F.typeof(arrays)
  344. if _check_is_tensor(array_type):
  345. # if the input is a single tensor
  346. # if only one tensor is provided, it is treated as a tuple along the
  347. # first dimension. For example, a tensor of shape (3,4,5) will be treated
  348. # as: tuple(tensor_1(4,5), tensor_2(4,5), tensor_3(4,5))
  349. if axis is None or axis >= MAX_NUMPY_DIMS:
  350. return ravel(arrays)
  351. arr_shape = F.shape(arrays)
  352. _check_axes_range((axis,), len(arr_shape))
  353. # move axis 0 to the disiganated position, while keep other axes' relative
  354. # positions unchanged
  355. new_axes, new_shape = _move_axes_for_concatenate(arr_shape, axis)
  356. arrays = transpose(arrays, new_axes)
  357. arrays = reshape(arrays, new_shape)
  358. return arrays
  359. flattened_arrays = ()
  360. if axis is None or axis >= MAX_NUMPY_DIMS:
  361. for arr in arrays:
  362. flattened_arrays += (ravel(arr),)
  363. axis = -1
  364. flattened_arrays = _promote_type_for_concatenate(flattened_arrays)
  365. return P.Concat(axis)(flattened_arrays)
  366. # convert a list of tensor to a tuple of tensor
  367. arrays = _convert_list_tensor_to_tuple_tensor(arrays)
  368. arr_shape = F.shape(arrays[0])
  369. _check_axes_range((axis,), len(arr_shape))
  370. # if only one tensor in the tuple/list, return the tensor itself
  371. if len(arrays) == 1:
  372. return arrays[0]
  373. arrays = _promote_type_for_concatenate(arrays)
  374. return P.Concat(axis)(arrays)
  375. def column_stack(tup):
  376. """
  377. Stacks 1-D tensors as columns into a 2-D tensor. 2-D tensors are stacked as-is,
  378. like np.hstack.
  379. Args:
  380. tup (Union[Tensor, tuple, list]): A sequence of 1-D or 2-D tensors. All
  381. of them must have the same shape except the axis to be concatenated.
  382. Returns:
  383. 2-D Tensor, formed by stacking the given tensors.
  384. Supported Platforms:
  385. ``Ascend`` ``GPU`` ``CPU``
  386. Raises:
  387. TypeError: If tup is not Tensor, list or tuple.
  388. ValueError: If tup is empty.
  389. Examples:
  390. >>> import mindspore.numpy as mnp
  391. >>> import numpy as onp
  392. >>> from mindspore import Tensor
  393. >>> x1 = Tensor(onp.array([1, 2, 3]).astype('int32'))
  394. >>> x2 = Tensor(onp.array([4, 5, 6]).astype('int32'))
  395. >>> output = mnp.column_stack((x1, x2))
  396. >>> print(output)
  397. [[1, 4],
  398. [2, 5],
  399. [3, 6]]
  400. """
  401. if _check_is_tensor(F.typeof(tup)):
  402. return tup
  403. if not _check_is_list(tup) and not _check_is_tuple(tup):
  404. _raise_type_error("Tensor or, list or tuple of tensors are required, but got ", tup)
  405. trans_tup = ()
  406. for tensor in tup:
  407. if tensor.ndim < 1:
  408. tensor = F.expand_dims(tensor, 0)
  409. if tensor.ndim == 1:
  410. tensor = F.expand_dims(tensor, 1)
  411. trans_tup += (tensor,)
  412. if not trans_tup:
  413. _raise_value_error("Need at least one tensor to concatenate.")
  414. return P.Concat(1)(trans_tup)
  415. def vstack(tup):
  416. """
  417. Stacks tensors in sequence vertically.
  418. This is equivalent to concatenation along the first axis. 1-D tensors should firstly be reshaped to (1, N),
  419. and then be concatenated along the first axis.
  420. Args:
  421. tup (Union[Tensor, tuple, list]): A sequence of 1-D or 2-D tensors. The tensors must have the same shape
  422. along all but the first axis. 1-D tensors must have the same shape.
  423. Returns:
  424. Stacked Tensor, formed by stacking the given tensors.
  425. Supported Platforms:
  426. ``Ascend`` ``GPU`` ``CPU``
  427. Raises:
  428. TypeError: If tup is not Tensor, list or tuple.
  429. ValueError: If tup is empty.
  430. Examples:
  431. >>> import mindspore.numpy as mnp
  432. >>> import numpy as onp
  433. >>> from mindspore import Tensor
  434. >>> x1 = Tensor(onp.array([1, 2, 3]).astype('int32'))
  435. >>> x2 = Tensor(onp.array([4, 5, 6]).astype('int32'))
  436. >>> output = mnp.vstack((x1, x2))
  437. >>> print(output)
  438. [[1, 2, 3],
  439. [4, 5, 6]]
  440. """
  441. if _check_is_tensor(F.typeof(tup)):
  442. return tup
  443. if not _check_is_list(tup) and not _check_is_tuple(tup):
  444. _raise_type_error("Tensor or, list or tuple of tensors are required, but got", tup)
  445. trans_tup = ()
  446. for tensor in tup:
  447. if tensor.ndim <= 1:
  448. tensor = _expand(tensor, 2, 0)
  449. trans_tup += (tensor,)
  450. if not trans_tup:
  451. _raise_value_error("Need at least one tensor to concatenate.")
  452. return P.Concat(0)(trans_tup)
  453. def hstack(tup):
  454. """
  455. Stacks tensors in sequence horizontally.
  456. This is equivalent to concatenation along the second axis, except for 1-D tensors
  457. where it concatenates along the first axis.
  458. Args:
  459. tup (Union[Tensor, tuple, list]): A sequence of 1-D or 2-D tensors. The
  460. tensors must have the same shape along all but the second axis, except
  461. 1-D tensors which can be any length.
  462. Returns:
  463. Stacked Tensor, formed by stacking the given tensors.
  464. Supported Platforms:
  465. ``Ascend`` ``GPU`` ``CPU``
  466. Raises:
  467. TypeError: If tup is not Tensor, list or tuple.
  468. ValueError: If tup is empty.
  469. Examples:
  470. >>> import mindspore.numpy as mnp
  471. >>> import numpy as onp
  472. >>> from mindspore import Tensor
  473. >>> x1 = Tensor(onp.array([1, 2, 3]).astype('int32'))
  474. >>> x2 = Tensor(onp.array([4, 5, 6]).astype('int32'))
  475. >>> output = mnp.hstack((x1, x2))
  476. >>> print(output)
  477. [1, 2, 3, 4, 5, 6]
  478. """
  479. if _check_is_tensor(F.typeof(tup)):
  480. return tup
  481. if not _check_is_list(tup) and not _check_is_tuple(tup):
  482. _raise_type_error("Tensor or, list or tuple of tensors are required, but got", tup)
  483. tuple_of_tensor = ()
  484. for tensor in tup:
  485. if tensor.ndim < 1:
  486. tensor = F.expand_dims(tensor, 0)
  487. tuple_of_tensor += (tensor,)
  488. if not tuple_of_tensor:
  489. _raise_value_error("Need at least one tensor to concatenate.")
  490. if tuple_of_tensor[0].ndim <= 1:
  491. return P.Concat(0)(tuple_of_tensor)
  492. return P.Concat(1)(tuple_of_tensor)
  493. def dstack(tup):
  494. """
  495. Stacks tensors in sequence depth wise (along the third axis).
  496. This is equivalent to concatenation along the third axis. 1-D tensors (N,) should be reshaped to (1,N,1).
  497. 2-D tensors (M,N) should be reshaped to (M,N,1) before concatenation.
  498. Args:
  499. tup (Union[Tensor, tuple, list]): A sequence of tensors. The tensors must have the same shape along all but
  500. the third axis. 1-D or 2-D tensors must have the same shape.
  501. Returns:
  502. Stacked Tensor, formed by stacking the given tensors.
  503. Supported Platforms:
  504. ``Ascend`` ``GPU`` ``CPU``
  505. Raises:
  506. TypeError: If tup is not Tensor, list or tuple.
  507. ValueError: If tup is empty.
  508. Examples:
  509. >>> import mindspore.numpy as mnp
  510. >>> import numpy as onp
  511. >>> from mindspore import Tensor
  512. >>> x1 = Tensor(onp.array([1, 2, 3]).astype('int32'))
  513. >>> x2 = Tensor(onp.array([4, 5, 6]).astype('int32'))
  514. >>> output = mnp.dstack((x1, x2))
  515. >>> print(output)
  516. [[[1, 4],
  517. [2, 5],
  518. [3, 6]]]
  519. """
  520. if _check_is_tensor(F.typeof(tup)):
  521. return tup
  522. if not _check_is_list(tup) and not _check_is_tuple(tup):
  523. _raise_type_error("Tensor or, list or tuple of tensors are required, but got", tup)
  524. trans_tup = ()
  525. for tensor in tup:
  526. if tensor.ndim <= 1:
  527. tensor = _expand(tensor, 2, 0)
  528. if tensor.ndim == 2:
  529. tensor = F.expand_dims(tensor, 2)
  530. trans_tup += (tensor,)
  531. if not trans_tup:
  532. _raise_value_error("Need at least one tensor to concatenate.")
  533. return P.Concat(2)(trans_tup)
  534. def where(condition, x=None, y=None):
  535. """
  536. Returns elements chosen from x or y depending on condition.
  537. Note:
  538. As nonzero is not supported, neither x or y can be None.
  539. Args:
  540. condition (Tensor): where True, yield x, otherwise yield y.
  541. x, y (Tensor): Values from which to choose. x, y and condition need
  542. to be broadcastable to some shape.
  543. Returns:
  544. Tensor or scalar, with elements from x where condition is True, and
  545. elements from y elsewhere.
  546. Raises:
  547. ValueError: if operands cannot be broadcast.
  548. Supported Platforms:
  549. ``Ascend`` ``GPU`` ``CPU``
  550. Examples:
  551. >>> import mindspore.numpy as np
  552. >>> condition = np.full((1, 1, 2), [False, True])
  553. >>> x = np.full((1, 3, 2), 5)
  554. >>> y = np.full((2, 1, 1), 7)
  555. >>> output = np.where(condition, x, y)
  556. >>> print(output)
  557. [[[7, 5],
  558. [7, 5],
  559. [7, 5]],
  560. [[7, 5],
  561. [7, 5],
  562. [7, 5]]]
  563. """
  564. # type promotes input tensors
  565. dtype1 = F.dtype(x)
  566. dtype2 = F.dtype(y)
  567. dtype = _promote(dtype1, dtype2)
  568. if not _check_same_type(dtype1, dtype):
  569. x = F.cast(x, dtype)
  570. if not _check_same_type(dtype2, dtype):
  571. y = F.cast(y, dtype)
  572. is_bool = _check_same_type(dtype1, mstype.bool_) and _check_same_type(
  573. dtype2, mstype.bool_)
  574. if is_bool:
  575. # select does not support bool type for x or y
  576. x = F.cast(x, mstype.float32)
  577. y = F.cast(y, mstype.float32)
  578. # broadcasts input tensors
  579. shape_out = _infer_out_shape(F.shape(condition),
  580. F.shape(x), F.shape(y))
  581. ndim_out = len(shape_out)
  582. if not _check_same_type(F.dtype(condition), mstype.float32):
  583. # tiling with bool is not supported on GPU
  584. condition = F.cast(condition, mstype.float32)
  585. condition = _expand(condition, ndim_out)
  586. x = _expand(x, ndim_out)
  587. y = _expand(y, ndim_out)
  588. condition = _broadcast_to(
  589. condition, F.shape(condition), shape_out, ndim_out)
  590. x = _broadcast_to(x, F.shape(x), shape_out, ndim_out)
  591. y = _broadcast_to(y, F.shape(y), shape_out, ndim_out)
  592. if not _check_same_type(F.dtype(condition), mstype.bool_):
  593. condition = F.cast(condition, mstype.bool_)
  594. res = F.select(condition, x, y)
  595. if is_bool:
  596. res = F.cast(res, mstype.bool_)
  597. return res
  598. def _atleast_xd(ndim, arys):
  599. """Returns arys with at least ndim."""
  600. for arr in arys:
  601. _check_input_tensor(F.typeof(arr))
  602. res = []
  603. for arr in arys:
  604. arr = _expand(arr, ndim)
  605. res.append(arr)
  606. if len(res) == 1:
  607. return res[0]
  608. return res
  609. def atleast_1d(*arys):
  610. """
  611. Converts inputs to arrays with at least one dimension.
  612. Scalar inputs are converted to 1-dimensional arrays, whilst
  613. higher-dimensional inputs are preserved.
  614. Note:
  615. In graph mode, returns a tuple of tensor instead of a list of
  616. tensors.
  617. Args:
  618. arys1, arys2, … (Tensor): one or more input tensors.
  619. Returns:
  620. Tensor, or list of tensors, each with a.ndim >= 1.
  621. Raises:
  622. TypeError: if the input is not a tensor.
  623. Supported Platforms:
  624. ``Ascend`` ``GPU`` ``CPU``
  625. Examples:
  626. >>> a = np.ones((2, 3))
  627. >>> b = np.ones(())
  628. >>> c = np.ones(5)
  629. >>> output = np.atleast_1d(a, b, c)
  630. >>> print(output)
  631. (Tensor(shape=[2, 3], dtype=Float32, value=
  632. [[1.00000000e+000, 1.00000000e+000, 1.00000000e+000],
  633. [1.00000000e+000, 1.00000000e+000, 1.00000000e+000]]),
  634. Tensor(shape=[1], dtype=Float32, value= [1.00000000e+000]),
  635. Tensor(shape=[5], dtype=Float32,
  636. value= [1.00000000e+000, 1.00000000e+000, 1.00000000e+000,
  637. 1.00000000e+000, 1.00000000e+000]))
  638. """
  639. return _atleast_xd(1, arys)
  640. def atleast_2d(*arys):
  641. """
  642. Views inputs as arrays with at least two dimensions.
  643. Note:
  644. In graph mode, returns a tuple of tensor instead of a list of
  645. tensors.
  646. Args:
  647. arys1, arys2, … (Tensor): one or more input tensors.
  648. Returns:
  649. Tensor, or list of tensors, each with a.ndim >= 2.
  650. Raises:
  651. TypeError: if the input is not a tensor.
  652. Supported Platforms:
  653. ``Ascend`` ``GPU`` ``CPU``
  654. Examples:
  655. >>> a = np.ones((2, 3))
  656. >>> b = np.ones(())
  657. >>> c = np.ones(5)
  658. >>> output = np.atleast_2d(a, b, c)
  659. >>> print(output)
  660. (Tensor(shape=[2, 3], dtype=Float32, value=
  661. [[1.00000000e+000, 1.00000000e+000, 1.00000000e+000],
  662. [1.00000000e+000, 1.00000000e+000, 1.00000000e+000]]),
  663. Tensor(shape=[1, 1], dtype=Float32, value= [[1.00000000e+000]]),
  664. Tensor(shape=[1, 5], dtype=Float32,
  665. value= [[1.00000000e+000, 1.00000000e+000, 1.00000000e+000,
  666. 1.00000000e+000, 1.00000000e+000]]))
  667. """
  668. return _atleast_xd(2, arys)
  669. def atleast_3d(*arys):
  670. """
  671. Views inputs as arrays with at least three dimensions.
  672. Note:
  673. In graph mode, returns a tuple of tensor instead of a list of
  674. tensors.
  675. Args:
  676. arys1, arys2, … (Tensor): one or more input tensors.
  677. Returns:
  678. Tensor, or list of tensors, each with a.ndim >= 3. For example,
  679. a 1-D array of shape (N,) becomes a view of shape (1, N, 1), and
  680. a 2-D array of shape (M, N) becomes a view of shape (M, N, 1).
  681. Raises:
  682. TypeError: if the input is not a tensor.
  683. Supported Platforms:
  684. ``Ascend`` ``GPU`` ``CPU``
  685. Examples:
  686. >>> a = np.ones((2, 3))
  687. >>> b = np.ones(())
  688. >>> c = np.ones(5)
  689. >>> output = np.atleast_3d(a, b, c)
  690. >>> print(output)
  691. (Tensor(shape=[2, 3, 1], dtype=Float32, value=
  692. [[[1.00000000e+000], [1.00000000e+000], [1.00000000e+000]],
  693. [[1.00000000e+000], [1.00000000e+000], [1.00000000e+000]]]),
  694. Tensor(shape=[1, 1, 1], dtype=Float32, value= [[[1.00000000e+000]]]),
  695. Tensor(shape=[1, 5, 1], dtype=Float32,
  696. value= [[[1.00000000e+000], [1.00000000e+000], [1.00000000e+000],
  697. [1.00000000e+000], [1.00000000e+000]]]))
  698. """
  699. res = []
  700. for arr in arys:
  701. ndim = F.rank(arr)
  702. if ndim == 0:
  703. arr = F.reshape(arr, (1, 1, 1))
  704. elif ndim == 1:
  705. arr = F.reshape(arr, (1, F.size(arr), 1))
  706. elif ndim == 2:
  707. arr = F.reshape(arr, F.shape(arr) + (1,))
  708. res.append(arr)
  709. if len(res) == 1:
  710. return res[0]
  711. return res
  712. def stack(arrays, axis=0):
  713. """
  714. Joins a sequence of arrays along a new axis.
  715. The axis parameter specifies the index of the new axis in the
  716. dimensions of the result. For example, if axis=0 it will be the
  717. first dimension and if axis=-1 it will be the last dimension.
  718. Note:
  719. Numpy argument out is not supported.
  720. Args:
  721. arrays (sequence of Tensor): Each array must have the same shape.
  722. axis (int): optional. The axis in the result array along which the
  723. input arrays are stacked.
  724. Returns:
  725. Tensor, The stacked array has one more dimension than the input
  726. arrays.
  727. Raises:
  728. ValueError: if input is not Tensor, tuple, or list.
  729. Supported Platforms:
  730. ``Ascend`` ``GPU`` ``CPU``
  731. Examples:
  732. >>> arrays = [np.ones((3, 4)) for _ in range(10)]
  733. >>> output = np.stack(arrays, axis=0)
  734. >>> print(output.shape)
  735. (10, 3, 4)
  736. >>> output = np.stack(arrays, axis=1)
  737. >>> print(output.shape)
  738. (3, 10, 4)
  739. >>> output = np.stack(arrays, axis=2)
  740. >>> print(output.shape)
  741. (3, 4, 10)
  742. """
  743. arr_type = F.typeof(arrays)
  744. if _check_is_tensor(arr_type):
  745. shape = F.shape(arrays)
  746. ndim = F.rank(arrays)
  747. axis = axis % ndim
  748. axes = F.make_range(ndim)
  749. perm = axes[1:axis+1] + (0,) + axes[axis+1:]
  750. if _is_empty(shape):
  751. return _empty(mstype.float32, shape[1:axis+1] + (shape[0],) + shape[axis+1:])
  752. return transpose(arrays, perm)
  753. if _check_is_tuple(arr_type) or _check_is_list(arr_type):
  754. shape = (len(arrays),) + F.shape(arrays[0])
  755. ndim = len(shape)
  756. axis = axis % ndim
  757. if _is_empty(shape):
  758. return _empty(mstype.float32, shape[1:axis+1] + (shape[0],) + shape[axis+1:])
  759. seq = ()
  760. for arr in arrays:
  761. seq += (F.expand_dims(arr, axis),)
  762. return concatenate(seq, axis)
  763. return _raise_value_error('input arrays must be Tensor, tuple, or list')
  764. class UniqueNet(Cell):
  765. """The operation `mindspore.ops.Unique` must be wrapped inside a model and executed in graph mode. """
  766. def __init__(self):
  767. super(UniqueNet, self).__init__()
  768. self.unique = P.Unique()
  769. def construct(self, x):
  770. return self.unique(x)
  771. def unique(x, return_inverse=False):
  772. """
  773. Finds the unique elements of a tensor. The input tensor will be flattened first
  774. when it has more than one dimension.
  775. Note:
  776. Numpy arguments `axis`, `return_index` and `return_counts` are not supported.
  777. This operator must be executed in graph mode.
  778. Args:
  779. x (Tensor): The input tensor to be processed.
  780. return_inverse (bool): If True, also return the indices of the unique tensor.
  781. Default: False.
  782. Returns:
  783. Tensor or tuple of Tensors.
  784. - If `return_inverse` is False, just return the unique tensor.
  785. - If `return_inverse` is True, return tuple of tensors.
  786. Supported Platforms:
  787. ``Ascend`` ``GPU`` ``CPU``
  788. Raises:
  789. TypeError: If x is not tensor.
  790. Examples:
  791. >>> import mindspore.numpy as mnp
  792. >>> import numpy as onp
  793. >>> from mindspore import context
  794. >>> context.set_context(mode=context.GRAPH_MODE)
  795. >>> input_x = mnp.asarray(onp.array([1, 2, 2, 2, 3, 4, 5]).astype('int32'))
  796. >>> output_x = mnp.unique(input_x)
  797. >>> print(output_x)
  798. [1, 2, 3, 4, 5]
  799. >>> output_x = mnp.unique(input_x, return_inverse=True)
  800. >>> print(output_x)
  801. (Tensor(shape=[5], dtype=Int32, value= [ 1, 2, 3, 4, 5]), Tensor(shape=[7], dtype=Int32,
  802. value= [0, 1, 1, 1, 2, 3, 4]))
  803. """
  804. if not _check_is_tensor(F.typeof(x)):
  805. _raise_type_error("Tensor is expected, but got", x)
  806. if F.tuple_len(F.shape(x)) > 1:
  807. x = ravel(x)
  808. uniq = UniqueNet()
  809. res = uniq(x)
  810. if not return_inverse:
  811. return res[0]
  812. return res