You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

array_creations.py 91 kB

5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """array operations, the function docs are adapted from Numpy API."""
  16. import math
  17. import operator
  18. import numpy as onp
  19. from ..common import Tensor
  20. from ..common import dtype as mstype
  21. from ..ops import operations as P
  22. from ..ops import functional as F
  23. from ..ops.primitive import constexpr
  24. from ..nn.layer.basic import tril as nn_tril
  25. from ..nn.layer.basic import triu as nn_triu
  26. from .._c_expression import Tensor as Tensor_
  27. from .utils import _check_input_for_asarray, _deep_list, _deep_tensor_to_nparray, \
  28. _check_input_tensor, _convert_64_to_32, _get_dtype_from_scalar, \
  29. _expand, _to_tensor, _slice_along_axis, _callable
  30. from .utils_const import _raise_value_error, _empty, _max, _min, \
  31. _check_same_type, _is_shape_empty, _check_shape, _check_dtype, _tile_size, _abs, \
  32. _raise_type_error, _expanded_shape, _check_is_float, _iota, _type_convert, \
  33. _canonicalize_axis, _list_comprehensions, _ceil, _tuple_slice, _raise_unimplemented_error, \
  34. _tuple_setitem
  35. from .array_ops import ravel, concatenate, broadcast_arrays, reshape, broadcast_to, flip, \
  36. apply_along_axis, where
  37. from .dtypes import nan, pi
  38. # According to official numpy reference, the dimension of a numpy array must be less
  39. # than 32
  40. MAX_NUMPY_DIMS = 32
  41. # All types that can be accepted as "array_like" parameters in graph mode.
  42. ARRAY_TYPES = (int, float, bool, list, tuple, Tensor)
  43. _reduce_min_keepdims = P.ReduceMin(True)
  44. _reduce_max_keepdims = P.ReduceMax(True)
  45. _reduce_mean_keepdims = P.ReduceMean(True)
  46. def array(obj, dtype=None, copy=True, ndmin=0):
  47. """
  48. Creates a tensor.
  49. This function creates tensors from an array-like object.
  50. Args:
  51. obj (Union[int, float, bool, list, tuple]): Input data, in any form that
  52. can be converted to a `Tensor`. This includes Tensor, list, tuple and numbers.
  53. dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype, can
  54. be in format of np.int32, or \'int32\'. If dtype is :class:`None`, the data type
  55. of the new tensor will be inferred from obj. Default is :class:`None`.
  56. copy (bool): If `True`, then the object is copied. Otherwise, a copy will
  57. only be made if necessary. Default: `True`.
  58. ndmin (int): Specifies the minimum number of dimensions that the resulting
  59. tensor should have. Ones will be pre-pended to the shape as needed to
  60. meet this requirement. Default: 0
  61. Returns:
  62. Tensor, generated tensor with the specified dtype.
  63. Raises:
  64. TypeError: If input arguments have types not specified above.
  65. ValueError: If input `obj` has different sizes at different dimensions.
  66. Supported Platforms:
  67. ``Ascend`` ``GPU`` ``CPU``
  68. Examples:
  69. >>> import mindspore.numpy as np
  70. >>> print(np.array([1,2,3]))
  71. [1 2 3]
  72. """
  73. res = asarray(obj, dtype)
  74. if ndmin > res.ndim:
  75. if res.size == 0:
  76. _raise_value_error("Empty tensor cannot be expanded beyond the current dimension.")
  77. res = _expand(res, ndmin)
  78. if copy:
  79. res = copy_(res)
  80. elif dtype is not None and dtype != res.dtype:
  81. res = res.astype(dtype)
  82. return res
  83. @constexpr
  84. def asarray_const(a, dtype=None):
  85. """Converts the input to tensor. Note here `a` cannot be tensor itself."""
  86. _check_input_for_asarray(a)
  87. if dtype is not None:
  88. dtype = _check_dtype(dtype)
  89. if isinstance(a, (float, int, bool)) and dtype is None:
  90. dtype = _get_dtype_from_scalar(a)
  91. if isinstance(a, (list, tuple)):
  92. # Convert all tuple/nested tuples to lists
  93. a = _deep_list(a)
  94. # Convert all tensor sub-elements to numpy arrays
  95. a = _deep_tensor_to_nparray(a)
  96. a = onp.asarray(a)
  97. if a.dtype is onp.dtype('object'):
  98. raise ValueError('Input array must have the same size across all dimensions.')
  99. # If dtype is not specified, we keep consistent with numpy decision
  100. # only exceptions are: we use int/float32
  101. if dtype is None:
  102. dtype = mstype.pytype_to_dtype(a.dtype)
  103. if dtype == mstype.float64:
  104. dtype = mstype.float32
  105. elif dtype == mstype.int64:
  106. dtype = mstype.int32
  107. if isinstance(a, onp.ndarray) and dtype is None:
  108. if a.dtype is onp.dtype('object'):
  109. raise TypeError(f"For Tensor conversion, the input_data is {a} that contains unsupported element.")
  110. dtype = mstype.pytype_to_dtype(a.dtype)
  111. a = Tensor.from_numpy(a)
  112. return Tensor(a, dtype=dtype)
  113. def asarray(a, dtype=None):
  114. """
  115. Converts the input to tensor.
  116. This function converts tensors from an array-like object.
  117. Args:
  118. a (Union[int, float, bool, list, tuple, Tensor]): Input data, in any form that can
  119. be converted to a `Tensor`. This includes Tensor, list, tuple and numbers.
  120. dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype, can
  121. be in format of np.int32, or \'int32\'. If dtype is :class:`None`, the data type
  122. of the new tensor will be inferred from obj. Default is :class:`None`.
  123. Returns:
  124. Tensor, generated tensor with the specified dtype.
  125. Raises:
  126. TypeError: If input arguments have types not specified above.
  127. ValueError: If input `a` has different sizes at different dimensions.
  128. Supported Platforms:
  129. ``Ascend`` ``GPU`` ``CPU``
  130. Examples:
  131. >>> import mindspore.numpy as np
  132. >>> print(np.asarray([1,2,3]))
  133. [1 2 3]
  134. """
  135. if isinstance(a, Tensor):
  136. if dtype is None or dtype == a.dtype:
  137. return a
  138. return a.astype(dtype)
  139. return asarray_const(a, dtype)
  140. @constexpr
  141. def asfarray_const(a, dtype=mstype.float32):
  142. """Converts the input to tensor. Note here `a` cannot be tensor itself."""
  143. _check_input_for_asarray(a)
  144. if isinstance(a, (list, tuple)):
  145. # Convert all tuple/nested tuples to lists
  146. a = _deep_list(a)
  147. # Convert all tensor sub-elements to numpy arrays
  148. a = _deep_tensor_to_nparray(a)
  149. a = onp.asarray(a)
  150. if a.dtype is onp.dtype('object'):
  151. raise ValueError(f"For Tensor conversion, the input_data is {a} that contains unsupported element.")
  152. a = Tensor.from_numpy(a)
  153. return Tensor(a, dtype)
  154. def asfarray(a, dtype=mstype.float32):
  155. """
  156. Similar to asarray, converts the input to a float tensor.
  157. If non-float dtype is defined, this function will return a float32 tensor instead.
  158. Args:
  159. a (Union[int, float, bool, list, tuple, Tensor]): Input data, in any form that can
  160. be converted to a `Tensor`. This includes Tensor, list, tuple and numbers.
  161. dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype, can
  162. be in format of np.int32, or \'int32\'. If dtype is :class:`None`, the data type
  163. of the new tensor will be inferred from `a`. Default is :class:`mindspore.float32`.
  164. Returns:
  165. Tensor, generated tensor with the specified float dtype.
  166. Raises:
  167. TypeError: If input arguments have types not specified above.
  168. ValueError: If input `a` has different sizes at different dimensions.
  169. Supported Platforms:
  170. ``Ascend`` ``GPU`` ``CPU``
  171. Examples:
  172. >>> import mindspore.numpy as np
  173. >>> print(np.asfarray([1,2,3]))
  174. [1. 2. 3.]
  175. """
  176. if dtype is None:
  177. return asarray(a)
  178. dtype = _check_dtype(dtype)
  179. # pylint: disable=consider-using-in
  180. if dtype != mstype.float16 and dtype != mstype.float32 and dtype != mstype.float64:
  181. dtype = mstype.float32
  182. if isinstance(a, Tensor):
  183. return a.astype(dtype)
  184. return asfarray_const(a, dtype)
  185. def copy_(a):
  186. """
  187. Returns a tensor copy of the given object.
  188. Args:
  189. a (Union[int, float, bool, list, tuple, Tensor]): Input data, in any form that can
  190. be converted to a `Tensor`. This includes Tensor, list, tuple and numbers.
  191. Returns:
  192. Tensor, has the same data as `a`.
  193. Raises:
  194. TypeError: If input `a` has type not specified above.
  195. ValueError: If input `a` has different sizes at different dimensions.
  196. Supported Platforms:
  197. ``Ascend`` ``GPU`` ``CPU``
  198. Examples:
  199. >>> import mindspore.numpy as np
  200. >>> x = np.ones((2,2))
  201. >>> print(np.copy(x))
  202. [[1. 1.]
  203. [1. 1.]]
  204. """
  205. a = asarray(a)
  206. return a.copy()
  207. def ones(shape, dtype=mstype.float32):
  208. """
  209. Returns a new tensor of given shape and type, filled with ones.
  210. Args:
  211. shape (Union[int, tuple, list]): the shape of the new tensor.
  212. dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype.
  213. Default is :class:`mstype.float32`.
  214. Returns:
  215. Tensor, with the designated `shape` and `dtype`, filled with ones.
  216. Raises:
  217. TypeError: If input arguments have types not specified above.
  218. ValueError: If `shape` entries have values :math:`< 0`.
  219. Supported Platforms:
  220. ``Ascend`` ``GPU`` ``CPU``
  221. Examples:
  222. >>> import mindspore.numpy as np
  223. >>> print(np.ones((2,2)))
  224. [[1. 1.]
  225. [1. 1.]]
  226. """
  227. shape = _check_shape(shape)
  228. dtype = _check_dtype(dtype)
  229. if _is_shape_empty(shape):
  230. return full(shape, 1.0, dtype)
  231. output = F.fill(dtype, shape, 1)
  232. return output
  233. def zeros(shape, dtype=mstype.float32):
  234. """
  235. Returns a new tensor of given shape and type, filled with zeros.
  236. Args:
  237. shape (Union[int, tuple, list]): the shape of the new tensor.
  238. dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype.
  239. Default is :class:`mstype.float32`.
  240. Returns:
  241. Tensor, with the designated `shape` and `dtype`, filled with zeros.
  242. Raises:
  243. TypeError: If input arguments have types not specified above.
  244. ValueError: If `shape` entries have values :math:`< 0`.
  245. Supported Platforms:
  246. ``Ascend`` ``GPU`` ``CPU``
  247. Examples:
  248. >>> import mindspore.numpy as np
  249. >>> print(np.zeros((2,2)))
  250. [[0. 0.]
  251. [0. 0.]]
  252. """
  253. shape = _check_shape(shape)
  254. dtype = _check_dtype(dtype)
  255. if _is_shape_empty(shape):
  256. return full(shape, 0.0, dtype)
  257. output = F.fill(dtype, shape, 0)
  258. return output
  259. def full(shape, fill_value, dtype=None):
  260. """
  261. Returns a new tensor of given shape and type, filled with `fill_value`.
  262. Args:
  263. shape (Union[int, tuple(int), list(int)]): Shape of the new tensor, e.g.,
  264. :math:`(2, 3)` or :math:`2`.
  265. fill_value (Union[int, float, bool, list, tuple]): Scalar or array_like
  266. fill value.
  267. dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype,
  268. if `dtype` is :class:`None`, the data type of the new tensor will be inferred from
  269. `fill_value`. Default is :class:`None`.
  270. Returns:
  271. Tensor, with the designated shape and dtype, filled with `fill_value`.
  272. Raises:
  273. TypeError: If input arguments have types not specified above.
  274. ValueError: If `shape` has entries < 0.
  275. Supported Platforms:
  276. ``Ascend`` ``GPU`` ``CPU``
  277. Examples:
  278. >>> import mindspore.numpy as np
  279. >>> print(np.full((2,2), True))
  280. [[True True]
  281. [True True]]
  282. """
  283. shape = _check_shape(shape)
  284. if not isinstance(fill_value, ARRAY_TYPES):
  285. _raise_type_error("fill value should be int, float, bool, list, tuple, Tensor, but got", fill_value)
  286. if dtype is not None:
  287. dtype = _check_dtype(dtype)
  288. else:
  289. if isinstance(fill_value, (int, float, bool)):
  290. dtype = _get_dtype_from_scalar(fill_value)
  291. if isinstance(fill_value, Tensor):
  292. dtype = fill_value.dtype
  293. if not _is_shape_empty(shape):
  294. if isinstance(fill_value, (int, float, bool)):
  295. return F.fill(dtype, shape, fill_value)
  296. if isinstance(fill_value, (list, tuple)):
  297. fill_value = asarray_const(fill_value)
  298. return broadcast_to(fill_value, shape)
  299. # if shape contains zero, use c.Tensor()
  300. return _convert_64_to_32(empty_compile(dtype, shape))
  301. def arange(start, stop=None, step=None, dtype=None):
  302. """
  303. Returns evenly spaced values within a given interval.
  304. Args:
  305. start(Union[int, float]): Start of interval. The interval includes this value.
  306. When `stop` is provided as a position argument, `start` must be given, when `stop`
  307. is a normal argument, `start` can be optional, and default is 0.
  308. Please see additional examples below.
  309. stop(Union[int, float], optional): End of interval. The interval does not
  310. include this value, except in some cases where `step` is not an integer
  311. and floating point round-off affects the length of out.
  312. step(Union[int, float], optional): Spacing between values. For any output
  313. `out`, this is the distance between two adjacent values, :math:`out[i+1] - out[i]`.
  314. The default step size is 1. If `step` is specified as a position argument,
  315. `start` must also be given.
  316. dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype.
  317. If dtype is None, the data type of the new tensor will be inferred from start,
  318. stop and step. Default is None.
  319. Returns:
  320. Tensor with evenly spaced values.
  321. Raises:
  322. TypeError(PyNative Mode) or RuntimeError(Graph Mode): If input arguments
  323. have types not specified above, or arguments are not given in the correct
  324. orders specified above.
  325. Supported Platforms:
  326. ``Ascend`` ``GPU`` ``CPU``
  327. Examples:
  328. >>> import mindspore.numpy as np
  329. >>> print(np.arange(0, 5, 1))
  330. [0 1 2 3 4]
  331. >>> print(np.arange(3))
  332. [0 1 2]
  333. >>> print(np.arange(start=0, stop=3))
  334. [0 1 2]
  335. >>> print(np.arange(0, stop=3, step=0.5))
  336. [0. 0.5 1. 1.5 2. 2.5]
  337. >>> print(np.arange(stop=3)) # This will lead to TypeError
  338. """
  339. # This implementation was inspired by jax.numpy.arange
  340. # infer the dtype
  341. if dtype is None:
  342. dtype = _get_dtype_from_scalar(start, stop, step)
  343. if stop is None and step is None: # (start, stop, step) -> (0, start, 1)
  344. num = _ceil(start)
  345. out = _iota(mstype.float32, num)
  346. elif step is None: # (start, stop, step) -> (start, stop, 1)
  347. num = _ceil(stop - start)
  348. out = _iota(mstype.float32, num) + start
  349. elif stop is None: # (start, stop, step) -> (0, start, step)
  350. num = _ceil((start + 0.0) / step)
  351. out = _iota(mstype.float32, num) * step
  352. else:
  353. num = _ceil((stop - start + 0.0) / step)
  354. out = _iota(mstype.float32, num) * step + start
  355. return out.astype(dtype)
  356. def _type_checking_for_xspace(start, stop, num, endpoint, dtype, axis):
  357. """utility parameter checking function for linspace, logspace, geomspace."""
  358. if not isinstance(start, ARRAY_TYPES):
  359. _raise_type_error("start should be int, float, bool, list, tuple, Tensor, but got", start)
  360. if not isinstance(stop, ARRAY_TYPES):
  361. _raise_type_error("end should be int, float, bool, list, tuple, Tensor, but got", stop)
  362. if not isinstance(start, Tensor):
  363. start = _type_convert(Tensor, start).astype(mstype.float32)
  364. if not isinstance(stop, Tensor):
  365. stop = _type_convert(Tensor, stop).astype(mstype.float32)
  366. if not isinstance(num, int):
  367. _raise_type_error("num should be an integer, but got ", num)
  368. if not isinstance(endpoint, bool):
  369. _raise_type_error("endpoint should be an boolean, but got ", endpoint)
  370. if dtype is not None:
  371. dtype = _check_dtype(dtype)
  372. else:
  373. dtype = mstype.float32
  374. start, stop = broadcast_arrays(start, stop)
  375. axis = _canonicalize_axis(axis, start.ndim+1)
  376. return start, stop, num, endpoint, dtype, axis
  377. def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0):
  378. """
  379. Returns evenly spaced values within a given interval.
  380. Args:
  381. start (Union[int, list(int), tuple(int), tensor]): The starting value of the sequence.
  382. stop (Union[int, list(int), tuple(int), tensor]): The end value of the sequence,
  383. unless `endpoint` is set to False. In that case, the sequence consists
  384. of all but the last of `num + 1` evenly spaced samples, so that `stop`
  385. is excluded. Note that the step size changes when `endpoint` is False.
  386. num (int, optional): Number of samples to generate. Default is 50.
  387. endpoint (bool, optional): If True, `stop` is the last sample. Otherwise, it is
  388. not included. Default is True.
  389. retstep (bool, optional): If True, return (`samples`, `step`), where `step` is
  390. the spacing between samples.
  391. dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype,
  392. If `dtype` is None, infer the data type from other input arguments. Default is None.
  393. axis (int, optional): The axis in the result to store the samples. Relevant
  394. only if start or stop are array-like. By default :math:`(0)`, the samples will
  395. be along a new axis inserted at the beginning. Use :math:`-1` to get an axis at the end.
  396. Default is :math:`0`.
  397. Returns:
  398. Tensor, with `num` equally spaced samples in the closed interval
  399. :math:`[start, stop]` or the half-open interval :math:`[start, stop)`
  400. (depending on whether `endpoint` is True or False).
  401. Step, the size of spacing between samples, only returned if `retstep` is True.
  402. Raises:
  403. TypeError: If input arguments have types not specified above.
  404. Supported Platforms:
  405. ``Ascend`` ``GPU`` ``CPU``
  406. Examples:
  407. >>> import mindspore.numpy as np
  408. >>> print(np.linspace(0, 5, 6))
  409. [0. 1. 2. 3. 4. 5.]
  410. """
  411. # This implementation was inspired by jax.numpy.linspace and numpy.linspace
  412. start, stop, num, endpoint, dtype, axis = _type_checking_for_xspace(start, stop, num, endpoint, dtype, axis)
  413. if not isinstance(retstep, bool):
  414. _raise_type_error("retstep should be an boolean, but got ", retstep)
  415. bounds_shape = start.shape
  416. bounds_shape = _tuple_slice(bounds_shape, None, axis) + (1,) + _tuple_slice(bounds_shape, axis, None)
  417. iota_shape = _list_comprehensions(start.ndim+1, 1, True)
  418. iota_shape = _tuple_slice(iota_shape, None, axis) + (num,) + _tuple_slice(iota_shape, axis+1, None)
  419. num_tensor = _type_convert(Tensor, num).astype(mstype.float32)
  420. div = (num_tensor - 1) if endpoint else num_tensor
  421. out = None
  422. delta = None
  423. if num > 1:
  424. delta = (stop - start) / div
  425. # This is similar to how numpy and jax compute linspace
  426. start_expand = reshape(start, bounds_shape)
  427. incremental_expand = reshape(_iota(mstype.float32, num), iota_shape)
  428. delta_expand = reshape(delta, bounds_shape)
  429. start_expand, incremental_expand, delta_expand = broadcast_arrays(
  430. start_expand, incremental_expand, delta_expand)
  431. out = start_expand + (incremental_expand * delta_expand)
  432. elif num == 1:
  433. delta = nan if endpoint else stop - start
  434. out = reshape(start, bounds_shape)
  435. else: # num == 0
  436. _raise_value_error("cannot support Tensor with num=0.")
  437. if retstep:
  438. return out.astype(dtype), delta
  439. return out.astype(dtype)
  440. def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0):
  441. """
  442. Returns numbers spaced evenly on a log scale.
  443. In linear space, the sequence starts at base ** start (base to the power of
  444. start) and ends with base ** stop (see endpoint below).
  445. Args:
  446. start (Union[int, list(int), tuple(int), tensor]): ``base ** start`` is the starting
  447. value of the sequence.
  448. stop (Union[int, list(int), tuple(int), tensor]): ``base ** stop`` is the final value of
  449. the sequence, unless `endpoint` is False. In that case, ``num + 1`` values are spaced
  450. over the interval in log-space, of which all but the last (a sequence of length num)
  451. are returned.
  452. num (int, optional): Number of samples to generate. Default is 50.
  453. endpoint (bool, optional): If True, `stop` is the last sample. Otherwise, it is
  454. not included. Default is True.
  455. base (Union[int, float], optional): The base of the log space. The step size
  456. between the elements in :math:`ln(samples) / ln(base)` (or :math:`log_{base}(samples)`)
  457. is uniform. Default is :math:`10.0`.
  458. dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype.
  459. If `dtype` is None, infer the data type from other input arguments. Default is None.
  460. axis (int, optional): The axis in the result to store the samples. Relevant
  461. only if start or stop is array-like. By default (:math:`0`), the samples will
  462. be along a new axis inserted at the beginning. Use :math:`-1` to get an axis at the end.
  463. Default is :math:`0`.
  464. Returns:
  465. Tensor, equally spaced on a log scale.
  466. Raises:
  467. TypeError: If input arguments have types not specified above.
  468. Supported Platforms:
  469. ``Ascend`` ``GPU`` ``CPU``
  470. Examples:
  471. >>> import mindspore.numpy as np
  472. >>> print(np.logspace(0, 5, 6, base=2.0))
  473. [ 1. 2. 4. 8. 16. 32.]
  474. """
  475. # This implementation was inspired by jax.numpy.linspace and numpy.linspace
  476. start, stop, num, endpoint, dtype, axis = _type_checking_for_xspace(start, stop, num, endpoint, dtype, axis)
  477. if not isinstance(base, (int, float, bool)):
  478. _raise_type_error("base should be a number, but got ", base)
  479. linspace_res = linspace(start, stop, num, endpoint=endpoint, retstep=False, dtype=None, axis=axis)
  480. return F.tensor_pow(base, linspace_res).astype(dtype)
  481. def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
  482. """
  483. Returns numbers spaced evenly on a log scale (a geometric progression).
  484. This is similar to logspace, but with endpoints specified directly. Each output sample
  485. is a constant multiple of the previous.
  486. Args:
  487. start (Union[int, list(int), tuple(int), tensor]): The starting value of the sequence.
  488. stop (Union[int, list(int), tuple(int), tensor]): The final value of the sequence,
  489. unless endpoint is False. In that case, num + 1 values are spaced over the
  490. interval in log-space, of which all but the last (a sequence of length num) are
  491. returned.
  492. num (int, optional): Number of samples to generate. Default is 50.
  493. endpoint (bool, optional): If True, `stop` is the last sample. Otherwise, it is
  494. not included. Default is True.
  495. dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype, can
  496. be in format of np.float32, or `float32`.If `dtype` is None, infer the data
  497. type from other input arguments. Default is None.
  498. axis (int, optional): The axis in the result to store the samples. Relevant
  499. only if start or stop is array-like. By default (0), the samples will
  500. be along a new axis inserted at the beginning. Use -1 to get an axis at the end.
  501. Default is 0.
  502. Returns:
  503. Tensor, with samples equally spaced on a log scale.
  504. Raises:
  505. TypeError: If input arguments have types not specified above.
  506. Supported Platforms:
  507. ``Ascend`` ``GPU`` ``CPU``
  508. Examples:
  509. >>> output = np.geomspace(1, 256, num=9)
  510. >>> print(output)
  511. [ 1. 2. 4. 8. 16. 32. 64. 128. 256.]
  512. >>> output = np.geomspace(1, 256, num=8, endpoint=False)
  513. >>> print(output)
  514. [ 1. 2. 4. 8. 16. 32. 64. 128.]
  515. """
  516. start, stop, num, endpoint, dtype, axis = _type_checking_for_xspace(start, stop, num, endpoint, dtype, axis)
  517. root = num
  518. if endpoint:
  519. root -= 1
  520. bases = F.tensor_pow(F.tensor_div(stop, start), asarray_const(1./(root)))
  521. exponents = linspace(zeros(F.shape(bases)), F.fill(F.dtype(bases), F.shape(bases), root),
  522. num, endpoint=endpoint, dtype=dtype, axis=axis)
  523. shape = F.shape(bases)
  524. axis = axis + F.rank(bases) + 1 if axis < 0 else axis
  525. expanded_shape = _tuple_slice(shape, None, axis) + (1,) + _tuple_slice(shape, axis, None)
  526. bases = F.reshape(bases, expanded_shape)
  527. start = F.reshape(start, expanded_shape)
  528. res = F.tensor_mul(F.tensor_pow(bases, exponents), start)
  529. if dtype is not None:
  530. res = F.cast(res, dtype)
  531. return res
  532. def eye(N, M=None, k=0, dtype=mstype.float32):
  533. """
  534. Returns a 2-D tensor with ones on the diagnoal and zeros elsewhere.
  535. Args:
  536. N (int): Number of rows in the output, must be larger than 0.
  537. M (int, optional): Number of columns in the output. If is :class:`None`, defaults to `N`,
  538. if defined, must be larger than 0. Deault is :class:`None`.
  539. k (int, optional): Index of the diagonal: 0 (the default) refers to the main
  540. diagonal, a positive value refers to an upper diagonal, and a negative value
  541. to a lower diagonal. Default is 0.
  542. dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype.
  543. Default is mstype.float32.
  544. Returns:
  545. A tensor of shape (N, M). A tensor where all elements are equal to zero,
  546. except for the k-th diagonal, whose values are equal to one.
  547. Raises:
  548. TypeError: If input arguments have types not specified above.
  549. Supported Platforms:
  550. ``Ascend`` ``GPU`` ``CPU``
  551. Examples:
  552. >>> import mindspore.numpy as np
  553. >>> print(np.eye(2, 2))
  554. [[1. 0.]
  555. [0. 1.]]
  556. """
  557. dtype = _check_dtype(dtype)
  558. if M is None:
  559. M = N
  560. if not (isinstance(M, int) and isinstance(N, int) and isinstance(k, int)):
  561. _raise_type_error("Input tensor dimensions should be integers.")
  562. out = None
  563. if N == 0 or M == 0:
  564. # Fill the shape with any value is fine.
  565. return full((N, M), 0, dtype)
  566. out = F.eye(N, M, dtype)
  567. if k >= M or k <= -N:
  568. return full((N, M), 0, dtype)
  569. if k != 0:
  570. out = out.astype(mstype.float32)
  571. if k > 0:
  572. out_left = full((N, k), 0, dtype)
  573. out_right = out[..., 0:M-k:1]
  574. return concatenate((out_left, out_right), 1).astype(dtype)
  575. if k < 0:
  576. out_upper = full((-k, M), 0, dtype)
  577. out_lower = out[0:N+k:1, ...]
  578. return concatenate((out_upper, out_lower), 0).astype(dtype)
  579. return out
  580. def identity(n, dtype=mstype.float32):
  581. """
  582. Returns the identity tensor.
  583. Args:
  584. n (int): Number of rows and columns in the output, must be larger than 0.
  585. dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype,
  586. default is :class:`mstype.float32`.
  587. Returns:
  588. A tensor of shape `(n, n)`, where all elements are equal to zero,
  589. except for the diagonal, whose values are equal to one.
  590. Supported Platforms:
  591. ``Ascend`` ``GPU`` ``CPU``
  592. Raises:
  593. TypeError: If input arguments have types not specified above.
  594. Examples:
  595. >>> import mindspore.numpy as np
  596. >>> print(np.identity(2))
  597. [[1. 0.]
  598. [0. 1.]]
  599. """
  600. if not isinstance(n, int):
  601. _raise_type_error("Input tensor dimensions should be integers.")
  602. dtype = _check_dtype(dtype)
  603. return eye(n, dtype=dtype)
  604. @constexpr
  605. def empty_compile(dtype, shape):
  606. return Tensor_(dtype, shape)
  607. def empty(shape, dtype=mstype.float32):
  608. """
  609. Returns a new array of given shape and type, without initializing
  610. entries.
  611. Note:
  612. Numpy argument `order` is not supported.
  613. Object arrays are not supported.
  614. Args:
  615. shape (Union[int, tuple(int)]): Shape of the empty array, e.g.,
  616. (2, 3) or 2.
  617. dtype (:class:`mindspore.dtype`, optional): Desired output data-type for the
  618. array, e.g, mstype.int8. Default is mstype.float32.
  619. Returns:
  620. Tensor, array of uninitialized (arbitrary) data of the given
  621. shape and dtype.
  622. Raises:
  623. TypeError: if the input shape or dtype is invalid.
  624. Supported Platforms:
  625. ``Ascend`` ``GPU`` ``CPU``
  626. Examples:
  627. >>> import mindspore.numpy as np
  628. >>> output = np.empty((2, 3))
  629. >>> print(output)
  630. # result may vary
  631. Tensor(shape=[2, 3], dtype=Float32, value=
  632. <uninitialized>)
  633. """
  634. shape = _check_shape(shape)
  635. dtype = _check_dtype(dtype)
  636. return empty_compile(dtype, shape)
  637. def _get_shape(array_like):
  638. """Returns the shape of the array like object."""
  639. if isinstance(array_like, Tensor):
  640. return array_like.shape
  641. return asarray_const(array_like).shape
  642. def _get_dtype(array_like):
  643. """Returns the data type of the array like object."""
  644. if isinstance(array_like, Tensor):
  645. return array_like.dtype
  646. return asarray_const(array_like).dtype
  647. def _x_like(prototype, dtype, shape, constructor, fill_value=None):
  648. """
  649. Returns a tensor with the same shape and type as prototype,
  650. using constructor.
  651. """
  652. if not isinstance(prototype, ARRAY_TYPES):
  653. _raise_type_error("prototype should be int, float, bool, list, tuple, Tensor, but got", prototype)
  654. dtype_out = dtype
  655. shape_out = shape
  656. if dtype_out is None:
  657. dtype_out = _get_dtype(prototype)
  658. if shape_out is None or isinstance(shape_out, (list, tuple)) and not shape_out:
  659. shape_out = _get_shape(prototype)
  660. if fill_value is not None:
  661. return constructor(shape_out, fill_value, dtype_out)
  662. return constructor(shape_out, dtype_out)
  663. def empty_like(prototype, dtype=None, shape=None):
  664. """
  665. Returns a new array with the same shape and type as a given array.
  666. Note:
  667. Input array must have the same size across a dimension.
  668. If `prototype` is not a Tensor, dtype is float32 by default if not provided.
  669. Args:
  670. prototype (Union[Tensor, list, tuple]): The shape and data-type of `prototype`
  671. define these same attributes of the returned array.
  672. dtype (:class:`mindspore.dtype`, optional): Overrides the data type of the
  673. result.
  674. shape (int or sequence of ints, optional): Overrides the shape
  675. of the result.
  676. Returns:
  677. Tensor, array of uninitialized (arbitrary) data with the same
  678. shape and type as `prototype`.
  679. Raises:
  680. ValueError: if `prototype` is not a Tensor, list or tuple.
  681. Supported Platforms:
  682. ``Ascend`` ``GPU`` ``CPU``
  683. Examples:
  684. >>> import mindspore.numpy as np
  685. >>> a = np.ones((4,1,2))
  686. >>> output = np.empty_like(a)
  687. >>> print(output)
  688. # result may vary
  689. Tensor(shape=[4, 1, 2], dtype=Float32, value=
  690. <uninitialized>)
  691. """
  692. return _x_like(prototype, dtype, shape, empty)
  693. def ones_like(a, dtype=None, shape=None):
  694. """
  695. Returns an array of ones with the same shape and type as a given array.
  696. Note:
  697. Input array must have the same size across a dimension.
  698. If `a` is not a Tensor, dtype is float32 by default if not provided.
  699. Args:
  700. a (Union[Tensor, list, tuple]): The shape and data-type of a define these same
  701. attributes of the returned array.
  702. dtype (:class:`mindspore.dtype`, optional): Overrides the data type of the
  703. result.
  704. shape (int or sequence of ints, optional): Overrides the shape
  705. of the result.
  706. Returns:
  707. Tensor, array of ones with the same shape and type as `a`.
  708. Raises:
  709. ValueError: if `a` is not a Tensor, list or tuple.
  710. Supported Platforms:
  711. ``Ascend`` ``GPU`` ``CPU``
  712. Examples:
  713. >>> import mindspore.numpy as np
  714. >>> a = np.ones((4,1,2))
  715. >>> output = np.ones_like(a)
  716. >>> print(output)
  717. [[[1. 1.]]
  718. [[1. 1.]]
  719. [[1. 1.]]
  720. [[1. 1.]]]
  721. """
  722. return _x_like(a, dtype, shape, ones)
  723. def zeros_like(a, dtype=None, shape=None):
  724. """
  725. Returns an array of zeros with the same shape and type as a given array.
  726. Note:
  727. Input array must have the same size across a dimension.
  728. If `a` is not a Tensor, dtype is float32 by default if not provided.
  729. Args:
  730. a (Union[Tensor, list, tuple]): The shape and data-type of a define these same
  731. attributes of the returned array.
  732. dtype (:class:`mindspore.dtype`, optional): Overrides the data type of the
  733. result.
  734. shape (int or sequence of ints, optional): Overrides the shape
  735. of the result.
  736. Returns:
  737. Tensor, array of zeros with the same shape and type as `a`.
  738. Raises:
  739. ValueError: if `a` is not a Tensor, list or tuple.
  740. Supported Platforms:
  741. ``Ascend`` ``GPU`` ``CPU``
  742. Examples:
  743. >>> import mindspore.numpy as np
  744. >>> a = np.ones((4,1,2))
  745. >>> output = np.zeros_like(a)
  746. >>> print(output)
  747. [[[0. 0.]]
  748. [[0. 0.]]
  749. [[0. 0.]]
  750. [[0. 0.]]]
  751. """
  752. return _x_like(a, dtype, shape, zeros)
  753. def full_like(a, fill_value, dtype=None, shape=None):
  754. """
  755. Returns a full array with the same shape and type as a given array.
  756. Note:
  757. Input array must have the same size across a dimension.
  758. If `a` is not a Tensor, dtype is float32 by default if not provided.
  759. Args:
  760. a (Union[Tensor, list, tuple]): The shape and data-type of `a` define these same
  761. attributes of the returned array.
  762. fill_value (scalar): Fill value.
  763. dtype (:class:`mindspore.dtype`, optional): Overrides the data type of the
  764. result.
  765. shape (int or sequence of ints, optional): Overrides the shape
  766. of the result.
  767. Returns:
  768. Tensor, array of fill_value with the same shape and type as `a`.
  769. Raises:
  770. ValueError: if `a` is not a Tensor, list or tuple.
  771. Supported Platforms:
  772. ``Ascend`` ``GPU`` ``CPU``
  773. Examples:
  774. >>> import mindspore.numpy as np
  775. >>> a = np.ones((4,1,2))
  776. >>> output = np.full_like(a, 0.5)
  777. >>> print(output)
  778. [[[0.5 0.5]]
  779. [[0.5 0.5]]
  780. [[0.5 0.5]]
  781. [[0.5 0.5]]]
  782. """
  783. return _x_like(a, dtype, shape, full, fill_value=fill_value)
  784. def tri(N, M=None, k=0, dtype=mstype.float32):
  785. """
  786. Returns a tensor with ones at and below the given diagonal and zeros elsewhere.
  787. Args:
  788. N(int): Number of rows in the array.
  789. M(int, optional): Number of columns in the array. By default, `M` is taken
  790. equal to N.
  791. k(int, optional): The sub-diagonal at and below which the array is filled.
  792. :math:`k = 0` is the main diagonal, while :math:`k < 0` is below it, and :math:`k > 0` is above.
  793. The default is 0.
  794. dtype(:class:`mindspore.dtype`, optional): Data type of the returned array. The default
  795. is :class:`mindspore.dtype`.
  796. Returns:
  797. Tensor with shape `(N, M)`, with its lower triangle filled with
  798. ones and zeros elsewhere; in other words :math:`T[i,j] = 1` for :math:`j <= i + k`,
  799. :math:`0` otherwise.
  800. Raises:
  801. TypeError: If input arguments have types not specified above.
  802. Supported Platforms:
  803. ``Ascend`` ``GPU`` ``CPU``
  804. Examples:
  805. >>> import mindspore.numpy as np
  806. >>> output = np.tri(3, 3, 1)
  807. >>> print(output)
  808. [[1. 1. 0.]
  809. [1. 1. 1.]
  810. [1. 1. 1.]]
  811. """
  812. if M is None:
  813. M = N
  814. return nn_tril((N, M), dtype, k)
  815. def tril(m, k=0):
  816. """
  817. Returns a lower triangle of a tensor.
  818. Returns a copy of a tensor with elements above the `k-th` diagonal zeroed.
  819. Args:
  820. m (Union[Tensor, list, tuple]): The shape and data-type of `m` define these same
  821. attributes of the returned tensor.
  822. k (int, optional): Diagonal above which to zero elements. :math:`k = 0` (the default)
  823. is the main diagonal, :math:`k < 0` is below it and :math:`k > 0` is above.
  824. Returns:
  825. Lower triangle of `m`, of same shape and data-type as `m`.
  826. Supported Platforms:
  827. ``Ascend`` ``GPU`` ``CPU``
  828. Raises:
  829. TypeError: If input arguments have types not specified above.
  830. ValueError: If input `m`\'s rank :math:`< 1`.
  831. Examples:
  832. >>> import mindspore.numpy as np
  833. >>> output = np.tril(np.ones((3, 3)))
  834. >>> print(output)
  835. [[1. 0. 0.]
  836. [1. 1. 0.]
  837. [1. 1. 1.]]
  838. """
  839. if not isinstance(m, Tensor):
  840. m = asarray_const(m)
  841. dtype = m.dtype
  842. m = m.astype(mstype.float32)
  843. assist = nn_tril(m.shape, mstype.float32, k)
  844. return F.tensor_mul(assist, m).astype(dtype)
  845. def triu(m, k=0):
  846. """
  847. Returns an upper triangle of a tensor.
  848. Returns a copy of a tensor with elements below the `k-th` diagonal zeroed.
  849. Args:
  850. m (Union[Tensor, list, tuple]): The shape and data-type of `m` define these same
  851. attributes of the returned tensor.
  852. k (int, optional): Diagonal below which to zero elements. :math:`k = 0` (the default)
  853. is the main diagonal, :math:`k < 0` is below it and :math:`k > 0` is above.
  854. Returns:
  855. Upper triangle of `m`, of same shape and data-type as `m`.
  856. Raises:
  857. TypeError: If input arguments have types not specified above.
  858. ValueError: If input `m`\'s rank < 1.
  859. Supported Platforms:
  860. ``Ascend`` ``GPU`` ``CPU``
  861. Examples:
  862. >>> import mindspore.numpy as np
  863. >>> output = np.triu(np.ones((3, 3)))
  864. >>> print(output)
  865. [[1. 1. 1.]
  866. [0. 1. 1.]
  867. [0. 0. 1.]]
  868. """
  869. if not isinstance(m, Tensor):
  870. m = asarray_const(m)
  871. dtype = m.dtype
  872. m = m.astype(mstype.float32)
  873. assist = nn_triu(m.shape, mstype.float32, k)
  874. return F.tensor_mul(assist, m).astype(dtype)
  875. def diagonal(a, offset=0, axis1=0, axis2=1):
  876. """
  877. Returns specified diagonals.
  878. If `a` is 2-D, returns the diagonal of `a` with the given offset, i.e., the
  879. collection of elements of the form ``a[i, i+offset]``. If `a` has more than two
  880. dimensions, then the axes specified by `axis1` and `axis2` are used to determine
  881. the 2-D sub-array whose diagonal is returned. The shape of the resulting
  882. array can be determined by removing `axis1` and `axis2` and appending an index
  883. to the right equal to the size of the resulting diagonals.
  884. Args:
  885. a (Tensor): Array from which the diagonals are taken.
  886. offset (int, optional): Offset of the diagonal from the main diagonal.
  887. Can be positive or negative. Defaults to main diagonal.
  888. axis1 (int, optional): Axis to be used as the first axis of the 2-D
  889. sub-arrays from which the diagonals should be taken. Defaults to
  890. first axis (0).
  891. axis2 (int, optional): Axis to be used as the second axis of the 2-D
  892. sub-arrays from which the diagonals should be taken. Defaults to
  893. second axis.
  894. Returns:
  895. Tensor, if `a` is 2-D, then `a` 1-D array containing the diagonal. If
  896. ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2` are removed,
  897. and a new axis inserted at the end corresponding to the diagonal.
  898. Raises:
  899. ValueError: if the input tensor has less than two dimensions.
  900. Supported Platforms:
  901. ``Ascend`` ``GPU`` ``CPU``
  902. Examples:
  903. >>> import mindspore.numpy as np
  904. >>> a = np.arange(4).reshape(2,2)
  905. >>> print(a)
  906. [[0 1]
  907. [2 3]]
  908. >>> output = np.diagonal(a)
  909. >>> print(output)
  910. [0 3]
  911. >>> output = np.diagonal(a, 1)
  912. >>> print(output)
  913. [1]
  914. >>> a = np.arange(8).reshape(2, 2, 2)
  915. >>> print(a)
  916. [[[0 1]
  917. [2 3]]
  918. [[4 5]
  919. [6 7]]]
  920. >>> output = np.diagonal(a, 0, 0, 1)
  921. >>> print(output)
  922. [[0 6]
  923. [1 7]]
  924. """
  925. return a.diagonal(offset=offset, axis1=axis1, axis2=axis2)
  926. def trace(a, offset=0, axis1=0, axis2=1, dtype=None):
  927. """
  928. Returns the sum along diagonals of the array.
  929. If `a` is 2-D, the sum along its diagonal with the given offset is returned,
  930. i.e., the sum of elements ``a[i,i+offset]`` for all `i`.
  931. If `a` has more than two dimensions, then the axes specified by `axis1` and
  932. `axis2` are used to determine the 2-D sub-arrays whose traces are returned.
  933. The shape of the resulting array is the same as that of a with `axis1` and
  934. `axis2` removed.
  935. Note:
  936. On GPU, the supported dtypes are np.float16, and np.float32.
  937. On CPU, the supported dtypes are np.float16, np.float32, and np.float64.
  938. Args:
  939. a (Tensor): Array from which the diagonals are taken.
  940. offset (int, optional): Offset of the diagonal from the main diagonal.
  941. Can be positive or negative. Defaults to main diagonal.
  942. axis1 (int, optional): Axis to be used as the first axis of the 2-D
  943. sub-arrays from which the diagonals should be taken. Defaults to
  944. first axis (0).
  945. axis2 (int, optional): Axis to be used as the second axis of the 2-D
  946. sub-arrays from which the diagonals should be taken. Defaults to
  947. second axis.
  948. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
  949. output Tensor.
  950. Returns:
  951. Tensor, sum_along_diagonals. If `a` is 2-D, the sum along the diagonal
  952. is returned. If `a` has larger dimensions, then an array of sums along
  953. diagonals is returned.
  954. Raises:
  955. ValueError: if the input tensor has less than two dimensions.
  956. Supported Platforms:
  957. ``Ascend`` ``GPU`` ``CPU``
  958. Examples:
  959. >>> import mindspore.numpy as np
  960. >>> output = np.trace(np.eye(3))
  961. >>> print(output)
  962. 3.0
  963. >>> a = np.arange(8).reshape((2,2,2))
  964. >>> output = np.trace(a)
  965. >>> print(output)
  966. [6 8]
  967. >>> a = np.arange(24).reshape((2,2,2,3))
  968. >>> output = np.trace(a).shape
  969. >>> print(output)
  970. (2, 3)
  971. """
  972. return a.trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)
  973. def _index(i, size, Cartesian=True):
  974. """If Cartesian=True, index 0 is swapped with index 1."""
  975. if Cartesian:
  976. if i == 1:
  977. return 0
  978. if i == 0 and size >= 2:
  979. return 1
  980. return i
  981. def meshgrid(*xi, sparse=False, indexing='xy'):
  982. """
  983. Returns coordinate matrices from coordinate vectors.
  984. Make `N-D` coordinate arrays for vectorized evaluations of `N-D`
  985. scalar/vector fields over `N-D` grids, given one-dimensional
  986. coordinate arrays `x1, x2,…, xn`.
  987. Note:
  988. Numpy argument copy is not supported, and a copy is always
  989. returned.
  990. Args:
  991. *xi (Tensor): 1-D arrays representing the coordinates
  992. of a grid.
  993. indexing (‘xy’, ‘ij’, optional): Cartesian (‘xy’, default) or
  994. matrix (‘ij’) indexing of output. In the 2-D case with
  995. inputs of length `M` and `N`, the outputs are of shape `(N, M)`
  996. for ‘xy’ indexing and `(M, N)` for ‘ij’ indexing. In the 3-D
  997. case with inputs of length `M`, `N` and `P`, outputs are of shape
  998. `(N, M, P)` for ‘xy’ indexing and `(M, N, P)` for ‘ij’ indexing.
  999. sparse (bool, optional): If True a sparse grid is returned in
  1000. order to conserve memory. Default is False.
  1001. Returns:
  1002. Tuple of tensors, for vectors `x1, x2,…, xn` with lengths
  1003. ``Ni=len(xi)``, return `(N1, N2, N3,...Nn)` shaped arrays if
  1004. ``indexing=’ij’`` or `(N2, N1, N3,...Nn)` shaped arrays if
  1005. ``indexing=’xy’`` with the elements of `xi` repeated to fill the matrix
  1006. along the first dimension for `x1`, the second for `x2` and so on.
  1007. Raises:
  1008. TypeError: if the input is not a tensor, or sparse is not boolean, or
  1009. indexing is not 'xy' or 'ij'.
  1010. Supported Platforms:
  1011. ``Ascend`` ``GPU`` ``CPU``
  1012. Examples:
  1013. >>> import mindspore.numpy as np
  1014. >>> x = np.linspace(0, 1, 3)
  1015. >>> y = np.linspace(0, 1, 2)
  1016. >>> xv, yv = np.meshgrid(x, y)
  1017. >>> print(xv)
  1018. [[0. 0.5 1. ]
  1019. [0. 0.5 1. ]]
  1020. >>> print(yv)
  1021. [[0. 0. 0.]
  1022. [1. 1. 1.]]
  1023. >>> xv, yv = np.meshgrid(x, y, sparse=True)
  1024. >>> print(xv)
  1025. [[0. 0.5 1. ]]
  1026. >>> print(yv)
  1027. [[0.]
  1028. [1.]]
  1029. """
  1030. _check_input_tensor(*xi)
  1031. if not isinstance(sparse, bool):
  1032. _raise_type_error('argument sparse should be boolean')
  1033. if indexing not in ('xy', 'ij'):
  1034. _raise_type_error("Valid values for `indexing` are 'xy' and 'ij'.")
  1035. shape_out = ()
  1036. for x in xi:
  1037. shape_out += (x.size,)
  1038. if _is_shape_empty(shape_out):
  1039. return ones(shape_out)
  1040. grids = []
  1041. for x in xi:
  1042. if F.rank(x) == 1:
  1043. grids.append(x)
  1044. else:
  1045. grids.append(ravel(x))
  1046. ndim = len(grids)
  1047. Cartesian = indexing == 'xy'
  1048. shape_out = ()
  1049. for i in range(len(grids)):
  1050. grid_index = _index(i, ndim, Cartesian=Cartesian)
  1051. shape_out += (F.shape(grids[grid_index])[0],)
  1052. res = []
  1053. for i, x in enumerate(grids):
  1054. grid_index = _index(i, ndim, Cartesian=Cartesian)
  1055. shape_expanded = _expanded_shape(ndim, shape_out[grid_index], grid_index)
  1056. x = x.reshape(shape_expanded)
  1057. if not sparse:
  1058. x = F.tile(x, _tile_size(shape_expanded, shape_out, ndim))
  1059. res.append(x)
  1060. return res
  1061. class nd_grid:
  1062. """
  1063. Construct a multi-dimensional "meshgrid".
  1064. ``grid = nd_grid()`` creates an instance which will return a mesh-grid
  1065. when indexed.
  1066. If instantiated with an argument of ``sparse=True``, the mesh-grid is
  1067. open (or not fleshed out) so that only one-dimension of each
  1068. returned argument is greater than 1.
  1069. Args:
  1070. sparse (bool): Whether the grid is sparse or not. Default is
  1071. False.
  1072. Returns:
  1073. Tensor or tuple of tensor, a meshgrid. If ``sparse=False``, returns
  1074. tensors are all of the same dimensions; and if ``sparse=True``,
  1075. returns tensors with only one dimension not equal to `1`.
  1076. """
  1077. def __init__(self, sparse=False):
  1078. self.sparse = sparse
  1079. def __getitem__(self, keys):
  1080. if isinstance(keys, slice):
  1081. keys = (keys,)
  1082. xi = []
  1083. for k in keys:
  1084. if not isinstance(k.start, int) or not isinstance(k.stop, int):
  1085. _raise_type_error('slice indices must be integers')
  1086. if k.step:
  1087. step = k.step
  1088. else:
  1089. step = 1
  1090. if isinstance(step, complex):
  1091. v = linspace(k.start, k.stop, int(abs(step)))
  1092. else:
  1093. v = arange(k.start, k.stop, step)
  1094. xi.append(v)
  1095. grids = meshgrid(*xi, sparse=self.sparse, indexing='ij')
  1096. if len(grids) == 1:
  1097. return grids[0]
  1098. if self.sparse:
  1099. return grids
  1100. if isinstance(grids, Tensor_):
  1101. return grids
  1102. expanded = []
  1103. for grid in grids:
  1104. expanded.append(F.expand_dims(grid, 0))
  1105. res = concatenate(tuple(expanded))
  1106. return res
  1107. class mGridClass(nd_grid):
  1108. """
  1109. mgrid is an :class:`nd_grid` instance with ``sparse=False``.
  1110. The dimension and number of the output arrays are equal to the number
  1111. of indexing dimensions. If the step length is not a complex number,
  1112. then the stop is not inclusive. However, if the step length is a complex
  1113. number (e.g. 5j), then the integer part of its magnitude is interpreted
  1114. as specifying the number of points to create between the start and
  1115. stop values, where the stop value is inclusive.
  1116. Note:
  1117. Not supported in graph mode.
  1118. Unlike Numpy, if the step length is a complex number with a real
  1119. component, the step length is handled as equivalent to
  1120. ``int(abs(step))``.
  1121. Returns:
  1122. Tensor or tuple of tensor, a meshgrid.
  1123. Raises:
  1124. TypeError: if slicing indices are not integers.
  1125. Supported Platforms:
  1126. ``Ascend`` ``GPU`` ``CPU``
  1127. Examples:
  1128. >>> from mindspore.numpy import mgrid
  1129. >>> output = mgrid[0:5, 0:5]
  1130. >>> print(output)
  1131. [[[0 0 0 0 0]
  1132. [1 1 1 1 1]
  1133. [2 2 2 2 2]
  1134. [3 3 3 3 3]
  1135. [4 4 4 4 4]]
  1136. [[0 1 2 3 4]
  1137. [0 1 2 3 4]
  1138. [0 1 2 3 4]
  1139. [0 1 2 3 4]
  1140. [0 1 2 3 4]]]
  1141. >>> output = mgrid[-1:1:5j]
  1142. >>> print(output)
  1143. [-1. -0.5 0. 0.5 1. ]
  1144. """
  1145. def __init__(self):
  1146. super(mGridClass, self).__init__(sparse=False)
  1147. class oGridClass(nd_grid):
  1148. """
  1149. ogrid is an :class:`nd_grid` instance with ``sparse=True``.
  1150. The dimension and number of the output arrays are equal to the number
  1151. of indexing dimensions. If the step length is not a complex number,
  1152. then the stop is not inclusive. However, if the step length is a complex
  1153. number (e.g. 5j), then the integer part of its magnitude is interpreted
  1154. as specifying the number of points to create between the start and
  1155. stop values, where the stop value is inclusive.
  1156. Note:
  1157. Not supported in graph mode.
  1158. Unlike Numpy, if the step length is a complex number with a real
  1159. component, the step length is handled as equivalent to
  1160. ``int(abs(step))``.
  1161. Raises:
  1162. TypeError: if slicing indices are not integers.
  1163. Supported Platforms:
  1164. ``Ascend`` ``GPU`` ``CPU``
  1165. Examples:
  1166. >>> from mindspore.numpy import ogrid
  1167. >>> output = ogrid[0:5,0:5]
  1168. >>> print(output)
  1169. [Tensor(shape=[5, 1], dtype=Int32, value=
  1170. [[0],
  1171. [1],
  1172. [2]
  1173. [3],
  1174. [4]]), Tensor(shape=[1, 5], dtype=Int32, value=
  1175. [[0, 1, 2, 3, 4]])]
  1176. >>> output = ogrid[-1:1:5j]
  1177. >>> print(output)
  1178. [-1. -0.5 0. 0.5 1. ]
  1179. """
  1180. def __init__(self):
  1181. super(oGridClass, self).__init__(sparse=True)
  1182. mgrid = mGridClass()
  1183. ogrid = oGridClass()
  1184. def diag(v, k=0):
  1185. """
  1186. Extracts a diagonal or construct a diagonal array.
  1187. Args:
  1188. v (Tensor): If `v` is a 2-D array, return a copy of its `k-th` diagonal.
  1189. If `v` is a 1-D array, return a 2-D array with v on the `k-th` diagonal.
  1190. k (int, optional): Diagonal in question. The default is 0. Use ``k>0`` for
  1191. diagonals above the main diagonal, and ``k<0`` for diagonals below the
  1192. main diagonal.
  1193. Returns:
  1194. Tensor, the extracted diagonal or constructed diagonal array.
  1195. Raises:
  1196. ValueError: if input is not 1-D or 2-D.
  1197. Supported Platforms:
  1198. ``Ascend`` ``GPU`` ``CPU``
  1199. Examples:
  1200. >>> import mindspore.numpy as np
  1201. >>> x = np.arange(9).reshape((3,3))
  1202. >>> print(x)
  1203. [[0 1 2]
  1204. [3 4 5]
  1205. [6 7 8]]
  1206. >>> output = np.diag(x)
  1207. >>> print(output)
  1208. [0 4 8]
  1209. >>> output = np.diag(x, k=1)
  1210. >>> print(output)
  1211. [1 5]
  1212. >>> output = np.diag(x, k=-1)
  1213. >>> print(output)
  1214. [3 7]
  1215. """
  1216. ndim = F.rank(v)
  1217. if ndim == 1:
  1218. return diagflat(v, k=k)
  1219. if ndim == 2:
  1220. shape = F.shape(v)
  1221. dtype = F.dtype(v)
  1222. if _is_shape_empty(shape):
  1223. return _empty(dtype, (0,))
  1224. e = eye(shape[0], shape[1], k, dtype)
  1225. prod = F.tensor_mul(v, e)
  1226. cast_type = dtype
  1227. if not _check_is_float(dtype):
  1228. # reduce sum only supports float types
  1229. cast_type = mstype.float32
  1230. prod = F.cast(prod, cast_type)
  1231. res = F.reduce_sum(prod, 1)
  1232. res = res[_max(0, -k): _min(shape[0], _max(0, shape[1] - k))]
  1233. if not _check_same_type(cast_type, dtype):
  1234. res = F.cast(res, dtype)
  1235. return res
  1236. return _raise_value_error("Input must be 1- or 2-d.")
  1237. def diagflat(v, k=0):
  1238. """
  1239. Creates a two-dimensional array with the flattened input as a diagonal.
  1240. Note:
  1241. On GPU, the supported dtypes are np.float16, and np.float32.
  1242. Args:
  1243. v (Tensor): Input data, which is flattened and set as the `k-th` diagonal
  1244. of the output.
  1245. k (int, optional): Diagonal to set; 0, the default, corresponds to the
  1246. “main” diagonal, a positive (negative) `k` giving the number of the
  1247. diagonal above (below) the main.
  1248. Returns:
  1249. Tensor, The 2-D output array.
  1250. Raises:
  1251. TypeError: if the input is not a tensor.
  1252. Supported Platforms:
  1253. ``Ascend`` ``GPU`` ``CPU``
  1254. Examples:
  1255. >>> import mindspore.numpy as np
  1256. >>> output = np.diagflat(np.asarray([[1,2], [3,4]]))
  1257. >>> print(output)
  1258. [[1 0 0 0]
  1259. [0 2 0 0]
  1260. [0 0 3 0]
  1261. [0 0 0 4]]
  1262. >>> output = np.diagflat(np.asarray([1,2]), 1)
  1263. >>> print(output)
  1264. [[0 1 0]
  1265. [0 0 2]
  1266. [0 0 0]]
  1267. """
  1268. _check_input_tensor(v)
  1269. dtype = F.dtype(v)
  1270. k_abs = _abs(k)
  1271. if _is_shape_empty(F.shape(v)):
  1272. return zeros((k_abs, k_abs), dtype)
  1273. v = ravel(v)
  1274. size = F.shape(v)[0]
  1275. e = eye(size, size, 0, dtype)
  1276. res = F.tensor_mul(v, e)
  1277. if k != 0:
  1278. pad_y = zeros((size, k_abs), dtype)
  1279. pad_x = zeros((k_abs, size + k_abs), dtype)
  1280. if k < 0:
  1281. res = concatenate((res, pad_y), axis=1)
  1282. res = concatenate((pad_x, res), axis=0)
  1283. else:
  1284. res = concatenate((pad_y, res), axis=1)
  1285. res = concatenate((res, pad_x), axis=0)
  1286. return res
  1287. def diag_indices(n, ndim=2):
  1288. """
  1289. Returns the indices to access the main diagonal of an array.
  1290. This returns a tuple of indices that can be used to access the main
  1291. diagonal of an array a with ``a.ndim >= 2`` dimensions and shape `(n, n, …, n)`.
  1292. For ``a.ndim = 2`` this is the usual diagonal, for ``a.ndim > 2`` this is the set
  1293. of indices to access ``a[i, i, ..., i]`` for ``i = [0..n-1]``.
  1294. Args:
  1295. n (int): The size, along each dimension, of the arrays for which
  1296. the returned indices can be used.
  1297. ndim (int, optional): The number of dimensions.
  1298. Returns:
  1299. Tuple of Tensor.
  1300. Raises:
  1301. TypeError: if input are not integers.
  1302. Supported Platforms:
  1303. ``Ascend`` ``GPU`` ``CPU``
  1304. Examples:
  1305. >>> import mindspore.numpy as np
  1306. >>> output = np.diag_indices(5, 3)
  1307. >>> print(output)
  1308. (Tensor(shape=[5], dtype=Int32, value= [0, 1, 2, 3, 4]),
  1309. Tensor(shape=[5], dtype=Int32, value= [0, 1, 2, 3, 4]),
  1310. Tensor(shape=[5], dtype=Int32, value= [0, 1, 2, 3, 4]))
  1311. """
  1312. if not isinstance(n, int) or not isinstance(ndim, int):
  1313. _raise_type_error('input must be integers')
  1314. return _list_comprehensions(ndim, arange(start=0, stop=n), True)
  1315. def ix_(*args):
  1316. r"""
  1317. Constructs an open mesh from multiple sequences.
  1318. This function takes `N` 1-D sequences and returns `N` outputs with `N`
  1319. dimensions each, such that the shape is 1 in all but one dimension
  1320. and the dimension with the non-unit shape value cycles through all
  1321. N dimensions.
  1322. Using ix\_ one can quickly construct index arrays that will index
  1323. the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
  1324. ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
  1325. Note:
  1326. Boolean masks are not supported.
  1327. Args:
  1328. *args (Tensor): 1-D sequences.
  1329. Returns:
  1330. Tuple of Tensor, `N` arrays with `N` dimensions each, with `N` the
  1331. number of input sequences. Together these arrays form an open
  1332. mesh.
  1333. Raises:
  1334. TypeError: if the input is not a tensor.
  1335. Supported Platforms:
  1336. ``Ascend`` ``GPU`` ``CPU``
  1337. Examples:
  1338. >>> import mindspore.numpy as np
  1339. >>> ixgrid = np.ix_(np.array([0, 1]), np.array([2, 4]))
  1340. >>> print(ixgrid)
  1341. (Tensor(shape=[2, 1], dtype=Int32, value=
  1342. [[0],
  1343. [1]]), Tensor(shape=[1, 2], dtype=Int32, value=
  1344. [[2, 4]]))
  1345. """
  1346. # TODO boolean mask
  1347. _check_input_tensor(*args)
  1348. ndim = len(args)
  1349. res = ()
  1350. for i, arr in enumerate(args):
  1351. if F.rank(arr) != 1:
  1352. return _raise_value_error('Cross index must be 1 dimensional')
  1353. res += (F.reshape(arr, _expanded_shape(ndim, arr.size, i)),)
  1354. return res
  1355. def vander(x, N=None, increasing=False):
  1356. """
  1357. Generates a Vandermonde matrix.
  1358. The columns of the output matrix are powers of the input vector. The order of
  1359. the powers is determined by the increasing boolean argument. Specifically, when
  1360. increasing is `False`, the i-th output column is the input vector raised element-wise
  1361. to the power of :math:`N - i - 1`. Such a matrix with a geometric progression in each row
  1362. is named for Alexandre-Theophile Vandermonde.
  1363. Args:
  1364. x (Union[list, tuple, Tensor]): 1-D input array.
  1365. N (int, optional): Number of columns in the output. If N is not specified, a
  1366. square array is returned (``N = len(x)``).
  1367. increasing (bool, optional): Order of the powers of the columns. If True, the
  1368. powers increase from left to right, if False (the default) they are reversed.
  1369. Returns:
  1370. Vandermonde matrix. If `increasing` is `False`, the first column is :math:`x^{(N-1)}`,
  1371. the second :math:`x^{(N-2)}` and so forth. If `increasing` is `True`, the columns are
  1372. :math:`x^0, x^1, ..., x^{(N-1)}`.
  1373. Raises:
  1374. TypeError: If inputs have types not specified above.
  1375. ValueError: If `x` is not 1-D, or `N` < 0.
  1376. Supported Platforms:
  1377. ``Ascend`` ``GPU`` ``CPU``
  1378. Examples:
  1379. >>> import mindspore.numpy as np
  1380. >>> print(np.vander([1,2,3,4,5]))
  1381. [[ 1 1 1 1 1]
  1382. [ 16 8 4 2 1]
  1383. [ 81 27 9 3 1]
  1384. [256 64 16 4 1]
  1385. [625 125 25 5 1]]
  1386. """
  1387. if isinstance(x, (list, tuple)):
  1388. x = asarray_const(x)
  1389. elif not isinstance(x, Tensor):
  1390. _raise_type_error("Input x must be list, tuple or Tensor, but got ", x)
  1391. if x.ndim != 1:
  1392. _raise_value_error("Input x must be 1-D, but got dimension=", x.ndim)
  1393. N = N or x.size
  1394. if not isinstance(N, int):
  1395. _raise_type_error("Input N must be an integer.")
  1396. if N <= 0:
  1397. _raise_value_error("Input N must > 0.")
  1398. if not isinstance(increasing, bool):
  1399. _raise_type_error("increasing must be a bool.")
  1400. exponent = _iota(x.dtype, N, increasing)
  1401. x = F.expand_dims(x, 1)
  1402. exponent = F.expand_dims(exponent, 0)
  1403. return F.tensor_pow(x, exponent)
  1404. def indices(dimensions, dtype=mstype.int32, sparse=False):
  1405. """
  1406. Returns an array representing the indices of a grid.
  1407. Computes an array where the subarrays contain index values 0, 1, …
  1408. varying only along the corresponding axis.
  1409. Args:
  1410. dimensions (tuple or list of ints): The shape of the grid.
  1411. dtype (:class:`mindspore.dtype`, optional): Data type of the result.
  1412. sparse (boolean, optional): Defaults to False. Return a sparse
  1413. representation of the grid instead of a dense representation.
  1414. Returns:
  1415. Tensor or tuple of Tensor, If `sparse` is False, returns one array
  1416. of grid indices, ``grid.shape = (len(dimensions),) + tuple(dimensions)``.
  1417. If sparse is True, returns a tuple of arrays, with
  1418. ``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with
  1419. ``dimensions[i]`` in the `ith` place
  1420. Raises:
  1421. TypeError: if input dimensions is not a tuple or list.
  1422. Supported Platforms:
  1423. ``Ascend`` ``GPU`` ``CPU``
  1424. Examples:
  1425. >>> import mindspore.numpy as np
  1426. >>> grid = np.indices((2, 3))
  1427. >>> print(grid)
  1428. [Tensor(shape=[2, 3], dtype=Int32, value=
  1429. [[0, 0, 0],
  1430. [1, 1, 1]]), Tensor(shape=[2, 3], dtype=Int32, value=
  1431. [[0, 1, 2],
  1432. [0, 1, 2]])]
  1433. """
  1434. if not isinstance(dimensions, (tuple, list)):
  1435. _raise_type_error('Shape of the grid must be tuple or list')
  1436. grids = ()
  1437. for d in dimensions:
  1438. grids += (arange(d, dtype=dtype),)
  1439. return meshgrid(*grids, sparse=sparse, indexing='ij')
  1440. def _check_window_size(x):
  1441. """Returns True if window size is greater than 1."""
  1442. if not isinstance(x, int):
  1443. _raise_type_error('the number fo points should be an int')
  1444. return x > 1
  1445. def bartlett(M):
  1446. """
  1447. Returns the Bartlett window.
  1448. The Bartlett window is very similar to a triangular window, except that the
  1449. end points are at zero. It is often used in signal processing for tapering a
  1450. signal, without generating too much ripple in the frequency domain.
  1451. Args:
  1452. M (int): Number of points in the output window. If zero or less, an empty
  1453. array is returned.
  1454. Returns:
  1455. Tensor, the triangular window, with the maximum value normalized to one
  1456. (the value one appears only if the number of samples is odd), with the
  1457. first and last samples equal to zero.
  1458. Raises:
  1459. TypeError: if `M` is not an int.
  1460. Supported Platforms:
  1461. ``Ascend`` ``GPU`` ``CPU``
  1462. Examples:
  1463. >>> import mindspore.numpy as np
  1464. >>> print(np.bartlett(12))
  1465. [0. 0.18181819 0.36363637 0.5454545 0.72727275 0.9090909
  1466. 0.9090909 0.72727275 0.5454545 0.36363637 0.18181819 0. ]
  1467. """
  1468. if not _check_window_size(M):
  1469. return ones(_max(0, M))
  1470. n = _iota(mstype.float32, M)
  1471. m_minus_one = _to_tensor(M - 1)
  1472. return _to_tensor(1) - F.absolute(_to_tensor(2)*n - m_minus_one)/m_minus_one
  1473. def blackman(M):
  1474. """
  1475. Returns the Blackman window.
  1476. The Blackman window is a taper formed by using the first three terms of a
  1477. summation of cosines. It was designed to have close to the minimal leakage
  1478. possible. It is close to optimal, only slightly worse than a Kaiser window.
  1479. Args:
  1480. M (int): Number of points in the output window. If zero or less, an empty
  1481. array is returned.
  1482. Returns:
  1483. Tensor, the window, with the maximum value normalized to one (the value
  1484. one appears only if the number of samples is odd).
  1485. Raises:
  1486. TypeError: if `M` is not an int.
  1487. Supported Platforms:
  1488. ``Ascend`` ``GPU`` ``CPU``
  1489. Examples:
  1490. >>> import mindspore.numpy as np
  1491. >>> print(np.blackman(12))
  1492. [-1.4901161e-08 3.2606430e-02 1.5990365e-01 4.1439798e-01
  1493. 7.3604518e-01 9.6704674e-01 9.6704674e-01 7.3604518e-01
  1494. 4.1439798e-01 1.5990365e-01 3.2606430e-02 -1.4901161e-08]
  1495. """
  1496. if not _check_window_size(M):
  1497. return ones(_max(0, M))
  1498. n_doubled = arange(1 - M, M, 2, dtype=mstype.float32)
  1499. return (_to_tensor(0.42) + _to_tensor(0.5)*F.cos(_to_tensor(pi/(M - 1))*n_doubled) +
  1500. _to_tensor(0.08)*F.cos(_to_tensor(2*pi/(M - 1))*n_doubled))
  1501. def hamming(M):
  1502. """
  1503. Returns the Hamming window.
  1504. The Hamming window is a taper formed by using a weighted cosine.
  1505. Args:
  1506. M (int): Number of points in the output window. If zero or less, an empty
  1507. array is returned.
  1508. Returns:
  1509. Tensor, the window, with the maximum value normalized to one (the value
  1510. one appears only if the number of samples is odd).
  1511. Raises:
  1512. TypeError: if `M` is not an int.
  1513. Supported Platforms:
  1514. ``Ascend`` ``GPU`` ``CPU``
  1515. Examples:
  1516. >>> import mindspore.numpy as np
  1517. >>> print(np.hamming(12))
  1518. [0.08000001 0.15302339 0.34890914 0.6054648 0.841236 0.9813669
  1519. 0.9813668 0.8412359 0.6054647 0.34890908 0.15302327 0.08000001]
  1520. """
  1521. if not _check_window_size(M):
  1522. return ones(_max(0, M))
  1523. n = _iota(mstype.float32, M)
  1524. return _to_tensor(0.54) - _to_tensor(0.46)*F.cos(_to_tensor(2*pi/(M - 1))*n)
  1525. def hanning(M):
  1526. """
  1527. Returns the Hanning window.
  1528. The Hanning window is a taper formed by using a weighted cosine.
  1529. Args:
  1530. M (int): Number of points in the output window. If zero or less, an empty
  1531. array is returned.
  1532. Returns:
  1533. Tensor, the window, with the maximum value normalized to one (the value
  1534. one appears only if the number of samples is odd).
  1535. Raises:
  1536. TypeError: if `M` is not an int.
  1537. Supported Platforms:
  1538. ``Ascend`` ``GPU`` ``CPU``
  1539. Examples:
  1540. >>> import mindspore.numpy as np
  1541. >>> print(np.hanning(12))
  1542. [0. 0.07937324 0.29229254 0.5711574 0.8274304 0.9797465
  1543. 0.97974646 0.82743025 0.5711573 0.29229245 0.07937312 0. ]
  1544. """
  1545. if not _check_window_size(M):
  1546. return ones(_max(0, M))
  1547. n = _iota(mstype.float32, M)
  1548. return _to_tensor(0.5) - _to_tensor(0.5)*F.cos(_to_tensor(2*pi/(M - 1))*n)
  1549. @constexpr
  1550. def tri_indices(n, k=0, m=None, upper=True):
  1551. """Returns triu/tril indices in o(nm) time."""
  1552. if not isinstance(n, (int, float, bool)):
  1553. raise TypeError("Input n must be a number.")
  1554. if not isinstance(k, (int, float, bool)):
  1555. raise TypeError("Input k must be a number.")
  1556. if m is None:
  1557. m = n
  1558. elif not isinstance(m, (int, float, bool)):
  1559. raise TypeError("Input m must be a number.")
  1560. if upper:
  1561. compare = operator.ge
  1562. else:
  1563. compare = operator.le
  1564. x_coordinate = []
  1565. y_coordinate = []
  1566. # math.ceil is used to match numpy's behaviour
  1567. for i in range(math.ceil(n)):
  1568. curr_limit = i + k
  1569. for j in range(math.ceil(m)):
  1570. if compare(j, curr_limit):
  1571. x_coordinate.append(i)
  1572. y_coordinate.append(j)
  1573. return asarray_const(x_coordinate), asarray_const(y_coordinate)
  1574. def triu_indices(n, k=0, m=None):
  1575. """
  1576. Returns the indices for the upper-triangle of an (n, m) array.
  1577. Args:
  1578. n (int): The size of the arrays for which the returned indices will be valid.
  1579. k (int, optional): Diagonal offset.
  1580. m (int, optional): The column dimension of the arrays for which the returned
  1581. arrays will be valid. By default `m` is taken equal to `n`.
  1582. Returns:
  1583. The indices for the triangle. The returned tuple contains two tensors, each
  1584. with the indices along one dimension of the tensor.
  1585. Raises:
  1586. TypeError: if `n`, `k`, `m` are not numbers.
  1587. Supported Platforms:
  1588. ``Ascend`` ``GPU`` ``CPU``
  1589. Examples:
  1590. >>> import mindspore.numpy as np
  1591. >>> print(np.triu_indices(3))
  1592. (Tensor(shape=[6], dtype=Int32, value= [0, 0, 0, 1, 1, 2]),
  1593. Tensor(shape=[6], dtype=Int32, value= [0, 1, 2, 1, 2, 2]))
  1594. """
  1595. return tri_indices(n, k, m, True)
  1596. def tril_indices(n, k=0, m=None):
  1597. """
  1598. Returns the indices for the lower-triangle of an (n, m) array.
  1599. Args:
  1600. n (int): The size of the arrays for which the returned indices will be valid.
  1601. k (int, optional): Diagonal offset.
  1602. m (int, optional): The column dimension of the arrays for which the returned
  1603. arrays will be valid. By default `m` is taken equal to `n`.
  1604. Returns:
  1605. The indices for the triangle. The returned tuple contains two tensors, each
  1606. with the indices along one dimension of the tensor.
  1607. Raises:
  1608. TypeError: if `n`, `k`, `m` are not numbers.
  1609. Supported Platforms:
  1610. ``Ascend`` ``GPU`` ``CPU``
  1611. Examples:
  1612. >>> import mindspore.numpy as np
  1613. >>> print(np.tril_indices(3))
  1614. (Tensor(shape=[6], dtype=Int32, value= [0, 1, 1, 2, 2, 2]),
  1615. Tensor(shape=[6], dtype=Int32, value= [0, 0, 1, 0, 1, 2]))
  1616. """
  1617. return tri_indices(n, k, m, False)
  1618. def triu_indices_from(arr, k=0):
  1619. """
  1620. Returns the indices for the upper-triangle of `arr`.
  1621. Args:
  1622. arr (Union[Tensor, list, tuple]): 2-dimensional array.
  1623. k (int, optional): Diagonal offset.
  1624. Returns:
  1625. triu_indices_from, tuple of 2 tensor, shape(N)
  1626. Indices for the upper-triangle of `arr`.
  1627. Raises:
  1628. TypeError: if `arr` cannot be converted to tensor, or `k` is not a number.
  1629. ValueError: if `arr` cannot be converted to a 2-dimensional tensor.
  1630. Supported Platforms:
  1631. ``Ascend`` ``GPU`` ``CPU``
  1632. Examples:
  1633. >>> import mindspore.numpy as np
  1634. >>> tensor = np.ones((3,3))
  1635. >>> print(np.triu_indices_from(tensor))
  1636. (Tensor(shape=[6], dtype=Int32, value= [0, 0, 0, 1, 1, 2]),
  1637. Tensor(shape=[6], dtype=Int32, value= [0, 1, 2, 1, 2, 2]))
  1638. """
  1639. arr = asarray(arr)
  1640. if arr.ndim != 2:
  1641. _raise_value_error("input array must be 2-d")
  1642. return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
  1643. def tril_indices_from(arr, k=0):
  1644. """
  1645. Returns the indices for the lower-triangle of `arr`.
  1646. Args:
  1647. arr (Union[Tensor, list, tuple]): 2-dimensional array.
  1648. k (int, optional): Diagonal offset.
  1649. Returns:
  1650. triu_indices_from, tuple of 2 tensor, shape(N)
  1651. Indices for the upper-triangle of `arr`.
  1652. Raises:
  1653. TypeError: if `arr` cannot be converted to tensor, or `k` is not a number.
  1654. ValueError: if `arr` cannot be converted to a 2-dimensional tensor.
  1655. Supported Platforms:
  1656. ``Ascend`` ``GPU`` ``CPU``
  1657. Examples:
  1658. >>> import mindspore.numpy as np
  1659. >>> tensor = np.ones((3,3))
  1660. >>> print(np.tril_indices_from(tensor))
  1661. (Tensor(shape=[6], dtype=Int32, value= [0, 1, 1, 2, 2, 2]),
  1662. Tensor(shape=[6], dtype=Int32, value= [0, 0, 1, 0, 1, 2]))
  1663. """
  1664. arr = asarray(arr)
  1665. if arr.ndim != 2:
  1666. _raise_value_error("input array must be 2-d")
  1667. return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
  1668. def histogram_bin_edges(a, bins=10, range=None, weights=None): # pylint: disable=redefined-builtin
  1669. """
  1670. Function to calculate only the edges of the bins used by the histogram function.
  1671. Note:
  1672. String values for `bins` is not supported.
  1673. Args:
  1674. a (Union[int, float, bool, list, tuple, Tensor]): Input data. The histogram
  1675. is computed over the flattened array.
  1676. bins ((Union[int, tuple, list, Tensor])): If `bins` is an int, it defines the number
  1677. of equal-width bins in the given range (10, by default). If `bins` is a
  1678. sequence, it defines the bin edges, including the rightmost edge,
  1679. allowing for non-uniform bin widths.
  1680. range((float, float), optional): The lower and upper range of the bins. If
  1681. not provided, `range` is simply ``(a.min(), a.max())``. Values outside
  1682. the range are ignored. The first element of the range must be less than
  1683. or equal to the second.
  1684. Returns:
  1685. Tensor, the edges to pass into `histogram`.
  1686. Supported Platforms:
  1687. ``Ascend`` ``GPU`` ``CPU``
  1688. Raises:
  1689. TypeError: if `bins` is an array and not one-dimensional.
  1690. Examples:
  1691. >>> import mindspore.numpy as np
  1692. >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
  1693. >>> print(np.histogram_bin_edges(arr, bins=2))
  1694. [0. 2.5 5. ]
  1695. """
  1696. if isinstance(bins, (tuple, list, Tensor)):
  1697. bins = _to_tensor(bins)
  1698. if F.rank(bins) != 1:
  1699. _raise_value_error('`bins` must be 1d, when an array')
  1700. return bins
  1701. if isinstance(bins, str):
  1702. # linspace does not support Tensor for num
  1703. _raise_unimplemented_error('string value for `bins` not implemented')
  1704. a = _to_tensor(a).ravel().astype(mstype.float32)
  1705. if range is None:
  1706. start = F.reduce_min(a)
  1707. end = F.reduce_max(a)
  1708. else:
  1709. start, end = _to_tensor(*range)
  1710. no_range = (end - start) == 0
  1711. start = where(no_range, start - 0.5, start)
  1712. end = where(no_range, end + 0.5, end)
  1713. return linspace(start, end, bins + 1)
  1714. def _pad_empty(arr, pad_width):
  1715. """
  1716. pads the array with constant values, used in mode: "empty"
  1717. """
  1718. dtype = arr.dtype
  1719. for i in range(arr.ndim):
  1720. shape = arr.shape
  1721. pad_before = ()
  1722. pad_after = ()
  1723. # To avoid any memory issues, we don't make tensor with 0s in their shapes
  1724. if pad_width[i][0] > 0:
  1725. pad_before += (empty(_tuple_setitem(shape, i, pad_width[i][0]), dtype=dtype),)
  1726. if pad_width[i][1] > 0:
  1727. pad_after += (empty(_tuple_setitem(shape, i, pad_width[i][1]), dtype=dtype),)
  1728. tensor_with_pad = pad_before + (arr,) + pad_after
  1729. arr = concatenate(tensor_with_pad, axis=i)
  1730. return arr
  1731. def _pad_constant(arr, pad_width, value):
  1732. """
  1733. pads the array with constant values, used in mode: "constant"
  1734. """
  1735. dtype = arr.dtype
  1736. for i in range(arr.ndim):
  1737. shape = arr.shape
  1738. pad_before = ()
  1739. pad_after = ()
  1740. # To avoid any memory issues, we don't make tensor with 0s in their shapes
  1741. if pad_width[i][0] > 0:
  1742. pad_before += (full(_tuple_setitem(shape, i, pad_width[i][0]), value[i][0], dtype=dtype),)
  1743. if pad_width[i][1] > 0:
  1744. pad_after += (full(_tuple_setitem(shape, i, pad_width[i][1]), value[i][1], dtype=dtype),)
  1745. tensor_with_pad = pad_before + (arr,) + pad_after
  1746. arr = concatenate(tensor_with_pad, axis=i)
  1747. return arr
  1748. def _pad_statistic(arr, pad_width, stat_length, stat_op):
  1749. """
  1750. pads the array with values calculated along the given axis, used in mode: "maximum",
  1751. "minimum", "mean"
  1752. """
  1753. ndim = arr.ndim
  1754. shape = arr.shape
  1755. if stat_length is None:
  1756. stat_length = _make_stat_length(shape)
  1757. else:
  1758. stat_length = _convert_pad_to_nd(stat_length, ndim)
  1759. stat_length = _limit_stat_length(stat_length, shape)
  1760. for i in range(ndim):
  1761. pad_before = stat_op(_slice_along_axis(arr, i, 0, stat_length[i][0]), i)
  1762. pad_before = (F.tile(pad_before, _tuple_setitem((1,)*ndim, i, pad_width[i][0])),)
  1763. pad_after = stat_op(_slice_along_axis(arr, i, shape[i]-stat_length[i][1], shape[i]), i)
  1764. pad_after = (F.tile(pad_after, _tuple_setitem((1,)*ndim, i, pad_width[i][1])),)
  1765. tensor_with_pad = pad_before + (arr,) + pad_after
  1766. arr = concatenate(tensor_with_pad, axis=i)
  1767. return arr
  1768. def _pad_edge(arr, pad_width):
  1769. """pad_edge is equivalent to pad_statistic with stat_lenght=1, used in mode:"edge"."""
  1770. def identity_op(arr, axis):
  1771. return arr
  1772. return _pad_statistic(arr, pad_width, 1, identity_op)
  1773. def _pad_wrap(arr, pad_width):
  1774. """The behaviour of wrap mode is consistent with jax.numpy, used in mode:"wrap"."""
  1775. ndim = arr.ndim
  1776. shape = arr.shape
  1777. for i in range(ndim):
  1778. padsize_before = pad_width[i][0] % shape[i]
  1779. padsize_after = pad_width[i][1] % shape[i]
  1780. total_repeats = pad_width[i][0] // shape[i] + 1 + pad_width[i][1] // shape[i]
  1781. tensor_with_pad = ()
  1782. # To avoid any memory issues, we don't make tensor with 0s in their shapes
  1783. if padsize_before > 0:
  1784. tensor_with_pad += (_slice_along_axis(arr, i, shape[i]-padsize_before, shape[i]),)
  1785. tensor_with_pad += (F.tile(arr, _tuple_setitem((1,)*ndim, i, total_repeats)),)
  1786. if padsize_after > 0:
  1787. tensor_with_pad += (_slice_along_axis(arr, i, 0, padsize_after),)
  1788. arr = concatenate(tensor_with_pad, axis=i)
  1789. return arr
  1790. def _pad_linear(arr, pad_width, end_values):
  1791. """Pads the arr with linear range values, used in mode: "linear_ramp"."""
  1792. ndim = arr.ndim
  1793. shape = arr.shape
  1794. dtype = arr.dtype
  1795. end_values = _convert_pad_to_nd(end_values, ndim)
  1796. for i in range(ndim):
  1797. # shape [..., 1, ...]
  1798. left_value = _slice_along_axis(arr, i, 0, 1)
  1799. right_value = _slice_along_axis(arr, i, shape[i]-1, shape[i])
  1800. pad_before = ()
  1801. pad_after = ()
  1802. if pad_width[i][0] > 0:
  1803. # shape [..., pad_width[i][0], ...]
  1804. pad_before = (linspace(end_values[i][0], left_value, num=pad_width[i][0],
  1805. endpoint=False, dtype=dtype, axis=i).squeeze(i+1),)
  1806. if pad_width[i][1] > 0:
  1807. # shape [..., pad_width[i][1], ...]
  1808. pad_after = linspace(right_value, end_values[i][1], num=pad_width[i][1]+1,
  1809. endpoint=True, dtype=dtype, axis=i).squeeze(i+1)
  1810. pad_after = (_slice_along_axis(pad_after, i, 1, pad_width[i][1]+1),)
  1811. tensor_with_pad = pad_before + (arr,) + pad_after
  1812. arr = concatenate(tensor_with_pad, axis=i)
  1813. return arr
  1814. def _pad_symmetric(arr, pad_width, reflect_type):
  1815. """pad the array with symmetric paddings"""
  1816. for i in range(arr.ndim):
  1817. array_length = arr.shape[i]
  1818. has_pad_before = (pad_width[i][0] > 0)
  1819. has_pad_after = (pad_width[i][1] > 0)
  1820. edge_before = _slice_along_axis(arr, i, 0, 1)
  1821. edge_end = _slice_along_axis(arr, i, array_length-1, array_length)
  1822. times_to_pad_before = pad_width[i][0] // array_length + 1
  1823. additional_pad_before = pad_width[i][0] % array_length
  1824. times_to_pad_after = pad_width[i][1] // array_length + 1
  1825. additional_pad_after = pad_width[i][1] % array_length
  1826. curr_pad = None
  1827. if has_pad_before:
  1828. # Deal with paddings before the original array
  1829. for times in range(times_to_pad_before):
  1830. if times < times_to_pad_before - 1:
  1831. endpoint = array_length
  1832. else:
  1833. endpoint = additional_pad_before
  1834. if endpoint != 0:
  1835. curr_pad = _slice_along_axis(arr, i, 0, endpoint)
  1836. curr_pad = flip(curr_pad, axis=i)
  1837. if reflect_type == "odd":
  1838. curr_pad = 2 * edge_before - curr_pad
  1839. arr = P.Concat(i)((curr_pad, arr))
  1840. edge_before = _slice_along_axis(arr, i, 0, 1)
  1841. if has_pad_after:
  1842. # Deal with paddings after the original array
  1843. for times in range(times_to_pad_after):
  1844. if times < times_to_pad_after - 1:
  1845. startpoint = arr.shape[i] - array_length
  1846. else:
  1847. startpoint = arr.shape[i] - additional_pad_after
  1848. if startpoint != arr.shape[i]:
  1849. curr_pad = _slice_along_axis(arr, i, startpoint, arr.shape[i])
  1850. curr_pad = flip(curr_pad, axis=i)
  1851. if reflect_type == "odd":
  1852. curr_pad = 2 * edge_end - curr_pad
  1853. arr = P.Concat(i)((arr, curr_pad))
  1854. edge_end = _slice_along_axis(arr, i, arr.shape[i]-1, arr.shape[i])
  1855. return arr
  1856. def _pad_reflect(arr, pad_width, reflect_type):
  1857. """
  1858. pad the array with reflect paddings, this is very similar to symmetric paddings,
  1859. but differs at how edges are selected.
  1860. """
  1861. # pylint: disable=too-many-nested-blocks
  1862. for i in range(arr.ndim):
  1863. array_length = arr.shape[i]
  1864. if array_length == 1:
  1865. total_repeats = pad_width[i][0] + pad_width[i][1] + 1
  1866. arr = F.tile(arr, _tuple_setitem((1,)*arr.ndim, i, total_repeats))
  1867. else:
  1868. has_pad_before = (pad_width[i][0] > 0)
  1869. has_pad_after = (pad_width[i][1] > 0)
  1870. edge_before = _slice_along_axis(arr, i, 0, 1)
  1871. edge_end = _slice_along_axis(arr, i, array_length-1, array_length)
  1872. pad_size = array_length - 1
  1873. times_to_pad_before = pad_width[i][0] // pad_size + 1
  1874. additional_pad_before = pad_width[i][0] % pad_size
  1875. times_to_pad_after = pad_width[i][1] // pad_size + 1
  1876. additional_pad_after = pad_width[i][1] % pad_size
  1877. curr_pad = None
  1878. if has_pad_before:
  1879. # Deal with paddings before the original array
  1880. for times in range(times_to_pad_before):
  1881. if times < times_to_pad_before - 1:
  1882. endpoint = array_length
  1883. else:
  1884. endpoint = additional_pad_before + 1
  1885. if endpoint != 1:
  1886. curr_pad = _slice_along_axis(arr, i, 1, endpoint)
  1887. curr_pad = flip(curr_pad, axis=i)
  1888. if reflect_type == "odd":
  1889. curr_pad = 2 * edge_before - curr_pad
  1890. arr = P.Concat(i)((curr_pad, arr))
  1891. edge_before = _slice_along_axis(arr, i, 0, 1)
  1892. if has_pad_after:
  1893. # Deal with paddings after the original array
  1894. for times in range(times_to_pad_after):
  1895. if times < times_to_pad_after - 1:
  1896. startpoint = arr.shape[i] - array_length
  1897. else:
  1898. startpoint = arr.shape[i] - additional_pad_after - 1
  1899. if startpoint != arr.shape[i]-1:
  1900. curr_pad = _slice_along_axis(arr, i, startpoint, arr.shape[i]-1)
  1901. curr_pad = flip(curr_pad, axis=i)
  1902. if reflect_type == "odd":
  1903. curr_pad = 2 * edge_end - curr_pad
  1904. arr = P.Concat(i)((arr, curr_pad))
  1905. edge_end = _slice_along_axis(arr, i, arr.shape[i]-1, arr.shape[i])
  1906. return arr
  1907. def _pad_func(arr, pad_width, func, **kwargs):
  1908. """applies padding function over different axis."""
  1909. # first creates a padded array with fixed length.
  1910. arr_dim = arr.ndim
  1911. pad_width = _convert_pad_to_nd(pad_width, arr_dim)
  1912. arr = _pad_empty(arr, pad_width)
  1913. for i in range(arr_dim):
  1914. # function signature: padding_func(tensor, iaxis_pad_width, iaxis, kwargs)
  1915. arr = apply_along_axis(func, i, arr, pad_width[i], i, kwargs)
  1916. return arr
  1917. @constexpr
  1918. def _make_stat_length(shape):
  1919. """converts the stat_length values."""
  1920. return tuple((shape[i], shape[i]) for i, _ in enumerate(shape))
  1921. @constexpr
  1922. def _limit_stat_length(stat_length, shape):
  1923. """limits the stat_length to current array length along given dimension."""
  1924. return tuple((min(stat_pair[0], shape[i]), min(stat_pair[1], shape[i])) for i, stat_pair in enumerate(stat_length))
  1925. @constexpr
  1926. def _convert_pad_to_nd(pad_values, ndim):
  1927. """broadcasts the pad_values to (ndim * 2)"""
  1928. if not isinstance(pad_values, (int, list, tuple, Tensor)):
  1929. raise TypeError(
  1930. "pad_width, stat_length, constant_values or end_values should only be int, list, tuple or tensor")
  1931. pad_tensor = _to_tensor(pad_values)
  1932. pad_shape = pad_tensor.shape
  1933. if not pad_shape:
  1934. pad_values = tuple((((pad_values,) * 2) for i in range(ndim)))
  1935. elif pad_shape == (1,):
  1936. pad_values = tuple((tuple(pad_values) * 2) for i in range(ndim))
  1937. elif pad_shape == (2,):
  1938. pad_values = tuple(tuple(pad_values) for i in range(ndim))
  1939. elif pad_shape == (1, 2):
  1940. pad_values = tuple(tuple(pad_values[0]) for i in range(ndim))
  1941. elif pad_shape == (ndim, 2):
  1942. pad_values = tuple(tuple(pad_pair) for pad_pair in pad_values)
  1943. else:
  1944. raise ValueError(f"input values must be able to broadcast to {(ndim, 2)}")
  1945. return pad_values
  1946. def pad(arr, pad_width, mode="constant", stat_length=None, constant_values=0,
  1947. end_values=0, reflect_type="even", **kwargs):
  1948. """
  1949. Pads an array.
  1950. Note:
  1951. Currently, `median` mode is not supported. `reflect` and `symmetric` mode
  1952. only supports GPU backend.
  1953. Args:
  1954. arr (Union[list, tuple, Tensor]): The array to pad.
  1955. pad_width (Union[int, tuple, list]): Number of values padded to the edges of
  1956. each axis. :class:`((before_1, after_1), ... (before_N, after_N))` creates
  1957. unique pad widths for each axis. :class:`((before, after),)` yields same
  1958. before and after pad for each axis. :class:`(pad,)` or int is a shortcut
  1959. for :class:`before = after = pad width` for all axes.
  1960. mode (string, optional):
  1961. One of the following string values:
  1962. - constant (default): Pads with a constant value.
  1963. - edge: Pads with the edge values of `arr`.
  1964. - linear_ramp: Pads with the linear ramp between end_value and the `arr` edge value.
  1965. - maximum: Pads with the maximum value of all or part of the vector along each axis.
  1966. - mean: Pads with the mean value of all or part of the vector along each axis.
  1967. - median: Pads with the median value of all or part of the vector along each axis.
  1968. - minimum: Pads with the minimum value of all or part of the vector along each axis.
  1969. - reflect: Pads with the reflection of the vector mirrored on the first
  1970. and last values of the vector along each axis.
  1971. - symmetric: Pads with the reflection of the vector mirrored along the edge
  1972. of the `arr`.
  1973. - wrap: Pads with the wrap of the vector along the axis. The first values
  1974. are used to pad the end and the end values are used to pad the beginning.
  1975. - empty: Pads with undefined values.
  1976. - <function>: The padding function, if used, should modify and return a new 1-d tensor.
  1977. It has the following signature: :class:`padding_func(tensor, iaxis_pad_width, iaxis, kwargs)`
  1978. stat_length (Union[tuple, list, int], optional): Used in \'maximum\', \'mean\',
  1979. \'median\', and \'minimum\'. Number of values at edge of each axis used
  1980. to calculate the statistic value. :class:`((before_1, after_1), ... (before_N, after_N))`
  1981. creates unique statistic lengths for each axis. :class:`((before, after),)`
  1982. yields same before and after statistic lengths for each axis. :class:`(stat_length,)`
  1983. or int is a shortcut for :class:`before = after = statistic length` for all
  1984. axes. Default is :class:`None`, to use the entire axis.
  1985. constant_values (Union[tuple, list, int], optional):
  1986. Used in :class:`constant mode`. The values to set the padded values for each
  1987. axis. :class:`((before_1, after_1), ... (before_N, after_N))` creates unique pad
  1988. constants for each axis. :class:`((before, after),)` yields same before and
  1989. after constants for each axis. :class:`(constant,)` or :class:`constant` is
  1990. a shortcut for :class:`before = after = constant` for all axes. Default is 0.
  1991. end_values (Union[tuple, list, int], optional): Used in 'linear_ramp'. The values
  1992. used for the ending value of the linear_ramp and that will form the edge of
  1993. the padded `arr`. :class:`((before_1, after_1), ... (before_N, after_N))`
  1994. unique end values for each axis. :class:`((before, after),)` yields same before
  1995. and after end values for each axis. :class:`(constant,)` or :class:`constant`
  1996. is a shortcut for :class:`before = after = constant` for all axes. Default is 0.
  1997. reflect_type(string, optional) can choose between \'even\' and \'odd\'. Used in
  1998. \'reflect\', and \'symmetric\'. The \'even\' style is the default with an
  1999. unaltered reflection around the edge value. For the \'odd\' style, the extended
  2000. part of the `arr` is created by subtracting the reflected values from two times
  2001. the edge value.
  2002. Returns:
  2003. Padded tensor of rank equal to `arr` with shape increased according to `pad_width`.
  2004. Raises:
  2005. TypeError: if `arr`, `pad_width`, `stat_length`, `constant_values` or `end_values`
  2006. have types not specified above.
  2007. ValueError: if `mode` cannot be recognized, or if `pad_width`, `stat_length`,
  2008. `constant_values`, `end_values` cannot broadcast to :class:`(arr.ndim, 2)`,
  2009. or if keyword arguments got unexpected inputs.
  2010. NotImplementedError: if mode is function or '/median'/.
  2011. Supported Platforms:
  2012. ``Ascend`` ``GPU`` ``CPU``
  2013. Examples:
  2014. >>> import mindspore.numpy as np
  2015. >>> tensor = np.array([1., 2., 3., 4., 5.])
  2016. >>> print(np.pad(tensor, (3, 4)))
  2017. [0. 0. 0. 1. 2. 3. 4. 5. 0. 0. 0. 0.]
  2018. >>> print(np.pad(tensor, (3, 4), mode="wrap"))
  2019. [3. 4. 5. 1. 2. 3. 4. 5. 1. 2. 3. 4.]
  2020. >>> >>> print(np.pad(tensor, (3, 4), mode="linear_ramp", end_values=(10, 10)))
  2021. [10. 7. 4. 1. 2. 3. 4. 5. 6.25 7.5 8.75 10. ]
  2022. """
  2023. arr = _to_tensor(arr)
  2024. if arr.ndim == 0:
  2025. return arr
  2026. pad_width = _convert_pad_to_nd(pad_width, arr.ndim)
  2027. stat_func = {"maximum": _reduce_max_keepdims,
  2028. "minimum": _reduce_min_keepdims,
  2029. "mean": _reduce_mean_keepdims,
  2030. "median": "not implemented"}
  2031. if mode not in ("constant", "maximum", "minimum", "mean", "median", "edge",
  2032. "wrap", "linear_ramp", "symmetric", "reflect", "empty") and \
  2033. not _callable(arr, mode):
  2034. _raise_value_error("Input mode not supported.")
  2035. if mode == "constant":
  2036. constant_values = _convert_pad_to_nd(constant_values, arr.ndim)
  2037. return _pad_constant(arr, pad_width, constant_values)
  2038. if mode in ("maximum", "minimum", "mean", "median"):
  2039. # TODO: support median mode once P.Sort/P.Median is supported on GPU/CPU
  2040. if mode == "median":
  2041. _raise_unimplemented_error("median mode is not supported yet")
  2042. return _pad_statistic(arr, pad_width, stat_length, stat_func[mode])
  2043. if mode == "edge":
  2044. return _pad_edge(arr, pad_width)
  2045. if mode == "wrap":
  2046. return _pad_wrap(arr, pad_width)
  2047. if mode == "linear_ramp":
  2048. return _pad_linear(arr, pad_width, end_values)
  2049. if mode == "symmetric":
  2050. return _pad_symmetric(arr, pad_width, reflect_type)
  2051. if mode == "reflect":
  2052. return _pad_reflect(arr, pad_width, reflect_type)
  2053. if mode == 'empty':
  2054. return _pad_empty(arr, pad_width)
  2055. return _pad_func(arr, pad_width, mode, **kwargs)