You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_array_ops.py 50 kB

4 years ago
5 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """unit tests for numpy array operations"""
  16. import functools
  17. import pytest
  18. import numpy as onp
  19. import mindspore.numpy as mnp
  20. from mindspore import context
  21. from mindspore.nn import Cell
  22. from .utils import rand_int, run_non_kw_test, check_all_results, match_array, \
  23. rand_bool, match_res, run_multi_test, to_tensor, match_all_arrays
  24. context.set_context(mode=context.PYNATIVE_MODE)
  25. class Cases():
  26. def __init__(self):
  27. self.all_shapes = [
  28. 1, 2, (1,), (2,), (1, 2, 3), [1], [2], [1, 2, 3]
  29. ]
  30. self.onp_dtypes = [onp.int32, 'int32', int,
  31. onp.float32, 'float32', float,
  32. onp.uint32, 'uint32',
  33. onp.bool_, 'bool', bool]
  34. self.mnp_dtypes = [mnp.int32, 'int32', int,
  35. mnp.float32, 'float32', float,
  36. mnp.uint32, 'uint32',
  37. mnp.bool_, 'bool', bool]
  38. self.array_sets = [1, 1.1, True, [1, 0, True], [1, 1.0, 2], (1,),
  39. [(1, 2, 3), (4, 5, 6)], onp.random.random( # pylint: disable=no-member
  40. (100, 100)).astype(onp.float32),
  41. onp.random.random((100, 100)).astype(onp.bool)]
  42. self.arrs = [
  43. rand_int(2),
  44. rand_int(2, 3),
  45. rand_int(2, 3, 4),
  46. rand_int(2, 3, 4, 5),
  47. ]
  48. # scalars expanded across the 0th dimension
  49. self.scalars = [
  50. rand_int(),
  51. rand_int(1),
  52. rand_int(1, 1),
  53. rand_int(1, 1, 1),
  54. ]
  55. # arrays of the same size expanded across the 0th dimension
  56. self.expanded_arrs = [
  57. rand_int(2, 3),
  58. rand_int(1, 2, 3),
  59. rand_int(1, 1, 2, 3),
  60. rand_int(1, 1, 1, 2, 3),
  61. ]
  62. # arrays with dimensions of size 1
  63. self.nested_arrs = [
  64. rand_int(1),
  65. rand_int(1, 2),
  66. rand_int(3, 1, 8),
  67. rand_int(1, 3, 9, 1),
  68. ]
  69. # arrays which can be broadcast
  70. self.broadcastables = [
  71. rand_int(5),
  72. rand_int(6, 1),
  73. rand_int(7, 1, 5),
  74. rand_int(8, 1, 6, 1)
  75. ]
  76. # boolean arrays which can be broadcast
  77. self.bool_broadcastables = [
  78. rand_bool(),
  79. rand_bool(1),
  80. rand_bool(5),
  81. rand_bool(6, 1),
  82. rand_bool(7, 1, 5),
  83. rand_bool(8, 1, 6, 1),
  84. ]
  85. self.mnp_prototypes = [
  86. mnp.ones((2, 3, 4)),
  87. [mnp.ones(3), (1, 2, 3), onp.ones(3), [4, 5, 6]],
  88. ([(1, 2), mnp.ones(2)], (onp.ones(2), [3, 4])),
  89. ]
  90. self.onp_prototypes = [
  91. onp.ones((2, 3, 4)),
  92. [onp.ones(3), (1, 2, 3), onp.ones(3), [4, 5, 6]],
  93. ([(1, 2), onp.ones(2)], (onp.ones(2), [3, 4])),
  94. ]
  95. # Test np.transpose and np.ndarray.transpose
  96. def mnp_transpose(input_tensor):
  97. a = mnp.transpose(input_tensor, (0, 2, 1))
  98. b = mnp.transpose(input_tensor, [2, 1, 0])
  99. c = mnp.transpose(input_tensor, (1, 0, 2))
  100. d = mnp.transpose(input_tensor)
  101. return a, b, c, d
  102. def onp_transpose(input_array):
  103. a = onp.transpose(input_array, (0, 2, 1))
  104. b = onp.transpose(input_array, [2, 1, 0])
  105. c = onp.transpose(input_array, (1, 0, 2))
  106. d = onp.transpose(input_array)
  107. return a, b, c, d
  108. @pytest.mark.level1
  109. @pytest.mark.platform_arm_ascend_training
  110. @pytest.mark.platform_x86_ascend_training
  111. @pytest.mark.platform_x86_gpu_training
  112. @pytest.mark.platform_x86_cpu
  113. @pytest.mark.env_onecard
  114. def test_transpose():
  115. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  116. mnp_array = to_tensor(onp_array)
  117. o_transposed = onp_transpose(onp_array)
  118. m_transposed = mnp_transpose(mnp_array)
  119. check_all_results(o_transposed, m_transposed)
  120. # Test np.expand_dims
  121. def mnp_expand_dims(input_tensor):
  122. a = mnp.expand_dims(input_tensor, 0)
  123. b = mnp.expand_dims(input_tensor, -1)
  124. c = mnp.expand_dims(input_tensor, axis=2)
  125. d = mnp.expand_dims(input_tensor, axis=-2)
  126. return a, b, c, d
  127. def onp_expand_dims(input_array):
  128. a = onp.expand_dims(input_array, 0)
  129. b = onp.expand_dims(input_array, -1)
  130. c = onp.expand_dims(input_array, axis=2)
  131. d = onp.expand_dims(input_array, axis=-2)
  132. return a, b, c, d
  133. @pytest.mark.level1
  134. @pytest.mark.platform_arm_ascend_training
  135. @pytest.mark.platform_x86_ascend_training
  136. @pytest.mark.platform_x86_gpu_training
  137. @pytest.mark.platform_x86_cpu
  138. @pytest.mark.env_onecard
  139. def test_expand_dims():
  140. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  141. mnp_array = to_tensor(onp_array)
  142. o_expanded = onp_expand_dims(onp_array)
  143. m_expanded = mnp_expand_dims(mnp_array)
  144. check_all_results(o_expanded, m_expanded)
  145. # Test np.squeeze
  146. def mnp_squeeze(input_tensor):
  147. a = mnp.squeeze(input_tensor)
  148. b = mnp.squeeze(input_tensor, 0)
  149. c = mnp.squeeze(input_tensor, axis=None)
  150. d = mnp.squeeze(input_tensor, axis=-3)
  151. e = mnp.squeeze(input_tensor, (2,))
  152. f = mnp.squeeze(input_tensor, (0, 2))
  153. return a, b, c, d, e, f
  154. def onp_squeeze(input_array):
  155. a = onp.squeeze(input_array)
  156. b = onp.squeeze(input_array, 0)
  157. c = onp.squeeze(input_array, axis=None)
  158. d = onp.squeeze(input_array, axis=-3)
  159. e = onp.squeeze(input_array, (2,))
  160. f = onp.squeeze(input_array, (0, 2))
  161. return a, b, c, d, e, f
  162. @pytest.mark.level1
  163. @pytest.mark.platform_arm_ascend_training
  164. @pytest.mark.platform_x86_ascend_training
  165. @pytest.mark.platform_x86_gpu_training
  166. @pytest.mark.platform_x86_cpu
  167. @pytest.mark.env_onecard
  168. def test_squeeze():
  169. onp_array = onp.random.random((1, 3, 1, 4, 2)).astype('float32')
  170. mnp_array = to_tensor(onp_array)
  171. o_squeezed = onp_squeeze(onp_array)
  172. m_squeezed = mnp_squeeze(mnp_array)
  173. check_all_results(o_squeezed, m_squeezed)
  174. onp_array = onp.random.random((1, 1, 1, 1, 1)).astype('float32')
  175. mnp_array = to_tensor(onp_array)
  176. o_squeezed = onp_squeeze(onp_array)
  177. m_squeezed = mnp_squeeze(mnp_array)
  178. check_all_results(o_squeezed, m_squeezed)
  179. # Test np.rollaxis
  180. def mnp_rollaxis(input_tensor):
  181. a = mnp.rollaxis(input_tensor, 0, 1)
  182. b = mnp.rollaxis(input_tensor, 0, 2)
  183. c = mnp.rollaxis(input_tensor, 2, 1)
  184. d = mnp.rollaxis(input_tensor, 2, 2)
  185. e = mnp.rollaxis(input_tensor, 0)
  186. f = mnp.rollaxis(input_tensor, 1)
  187. return a, b, c, d, e, f
  188. def onp_rollaxis(input_array):
  189. a = onp.rollaxis(input_array, 0, 1)
  190. b = onp.rollaxis(input_array, 0, 2)
  191. c = onp.rollaxis(input_array, 2, 1)
  192. d = onp.rollaxis(input_array, 2, 2)
  193. e = onp.rollaxis(input_array, 0)
  194. f = onp.rollaxis(input_array, 1)
  195. return a, b, c, d, e, f
  196. @pytest.mark.level1
  197. @pytest.mark.platform_arm_ascend_training
  198. @pytest.mark.platform_x86_ascend_training
  199. @pytest.mark.platform_x86_gpu_training
  200. @pytest.mark.platform_x86_cpu
  201. @pytest.mark.env_onecard
  202. def test_rollaxis():
  203. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  204. mnp_array = to_tensor(onp_array)
  205. o_rolled = onp_rollaxis(onp_array)
  206. m_rolled = mnp_rollaxis(mnp_array)
  207. check_all_results(o_rolled, m_rolled)
  208. # Test np.swapaxes
  209. def mnp_swapaxes(input_tensor):
  210. a = mnp.swapaxes(input_tensor, 0, 1)
  211. b = mnp.swapaxes(input_tensor, 1, 0)
  212. c = mnp.swapaxes(input_tensor, 1, 1)
  213. d = mnp.swapaxes(input_tensor, 2, 1)
  214. e = mnp.swapaxes(input_tensor, 1, 2)
  215. f = mnp.swapaxes(input_tensor, 2, 2)
  216. return a, b, c, d, e, f
  217. def onp_swapaxes(input_array):
  218. a = onp.swapaxes(input_array, 0, 1)
  219. b = onp.swapaxes(input_array, 1, 0)
  220. c = onp.swapaxes(input_array, 1, 1)
  221. d = onp.swapaxes(input_array, 2, 1)
  222. e = onp.swapaxes(input_array, 1, 2)
  223. f = onp.swapaxes(input_array, 2, 2)
  224. return a, b, c, d, e, f
  225. @pytest.mark.level1
  226. @pytest.mark.platform_arm_ascend_training
  227. @pytest.mark.platform_x86_ascend_training
  228. @pytest.mark.platform_x86_gpu_training
  229. @pytest.mark.platform_x86_cpu
  230. @pytest.mark.env_onecard
  231. def test_swapaxes():
  232. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  233. mnp_array = to_tensor(onp_array)
  234. o_swaped = onp_swapaxes(onp_array)
  235. m_swaped = mnp_swapaxes(mnp_array)
  236. check_all_results(o_swaped, m_swaped)
  237. # Test np.reshape
  238. def mnp_reshape(input_tensor):
  239. a = mnp.reshape(input_tensor, (3, 8))
  240. b = mnp.reshape(input_tensor, [3, -1])
  241. c = mnp.reshape(input_tensor, (-1, 12))
  242. d = mnp.reshape(input_tensor, (-1,))
  243. e = mnp.reshape(input_tensor, 24)
  244. f = mnp.reshape(input_tensor, [2, 4, -1])
  245. g = input_tensor.reshape(3, 8)
  246. h = input_tensor.reshape(3, -1)
  247. i = input_tensor.reshape([-1, 3])
  248. j = input_tensor.reshape(-1)
  249. return a, b, c, d, e, f, g, h, i, j
  250. def onp_reshape(input_array):
  251. a = onp.reshape(input_array, (3, 8))
  252. b = onp.reshape(input_array, [3, -1])
  253. c = onp.reshape(input_array, (-1, 12))
  254. d = onp.reshape(input_array, (-1,))
  255. e = onp.reshape(input_array, 24)
  256. f = onp.reshape(input_array, [2, 4, -1])
  257. g = input_array.reshape(3, 8)
  258. h = input_array.reshape(3, -1)
  259. i = input_array.reshape([-1, 3])
  260. j = input_array.reshape(-1)
  261. return a, b, c, d, e, f, g, h, i, j
  262. @pytest.mark.level1
  263. @pytest.mark.platform_arm_ascend_training
  264. @pytest.mark.platform_x86_ascend_training
  265. @pytest.mark.platform_x86_gpu_training
  266. @pytest.mark.platform_x86_cpu
  267. @pytest.mark.env_onecard
  268. def test_reshape():
  269. onp_array = onp.random.random((2, 3, 4)).astype('float32')
  270. mnp_array = to_tensor(onp_array)
  271. o_reshaped = onp_reshape(onp_array)
  272. m_reshaped = mnp_reshape(mnp_array)
  273. check_all_results(o_reshaped, m_reshaped)
  274. # Test np.ravel
  275. def mnp_ravel(input_tensor):
  276. a = mnp.ravel(input_tensor)
  277. return a
  278. def onp_ravel(input_array):
  279. a = onp.ravel(input_array)
  280. return a
  281. @pytest.mark.level1
  282. @pytest.mark.platform_arm_ascend_training
  283. @pytest.mark.platform_x86_ascend_training
  284. @pytest.mark.platform_x86_gpu_training
  285. @pytest.mark.platform_x86_cpu
  286. @pytest.mark.env_onecard
  287. def test_ravel():
  288. onp_array = onp.random.random((2, 3, 4)).astype('float32')
  289. mnp_array = to_tensor(onp_array)
  290. o_ravel = onp_ravel(onp_array)
  291. m_ravel = mnp_ravel(mnp_array).asnumpy()
  292. match_array(o_ravel, m_ravel)
  293. # Test np.concatenate
  294. def mnp_concatenate(input_tensor):
  295. a = mnp.concatenate(input_tensor, None)
  296. b = mnp.concatenate(input_tensor, 0)
  297. c = mnp.concatenate(input_tensor, 1)
  298. d = mnp.concatenate(input_tensor, 2)
  299. return a, b, c, d
  300. def onp_concatenate(input_array):
  301. a = onp.concatenate(input_array, None)
  302. b = onp.concatenate(input_array, 0)
  303. c = onp.concatenate(input_array, 1)
  304. d = onp.concatenate(input_array, 2)
  305. return a, b, c, d
  306. @pytest.mark.level1
  307. @pytest.mark.platform_arm_ascend_training
  308. @pytest.mark.platform_x86_ascend_training
  309. @pytest.mark.platform_x86_gpu_training
  310. @pytest.mark.platform_x86_cpu
  311. @pytest.mark.env_onecard
  312. def test_concatenate():
  313. onp_array = onp.random.random((5, 4, 3, 2)).astype('float32')
  314. mnp_array = to_tensor(onp_array)
  315. o_concatenate = onp_concatenate(onp_array)
  316. m_concatenate = mnp_concatenate(mnp_array)
  317. check_all_results(o_concatenate, m_concatenate)
  318. def mnp_append(arr1, arr2):
  319. a = mnp.append(arr1, arr2)
  320. b = mnp.append(arr1, arr2, axis=0)
  321. c = mnp.append(arr1, arr2, axis=-1)
  322. return a, b, c
  323. def onp_append(arr1, arr2):
  324. a = onp.append(arr1, arr2)
  325. b = onp.append(arr1, arr2, axis=0)
  326. c = onp.append(arr1, arr2, axis=-1)
  327. return a, b, c
  328. @pytest.mark.level1
  329. @pytest.mark.platform_arm_ascend_training
  330. @pytest.mark.platform_x86_ascend_training
  331. @pytest.mark.platform_x86_gpu_training
  332. @pytest.mark.platform_x86_cpu
  333. @pytest.mark.env_onecard
  334. def test_append():
  335. onp_array = onp.random.random((4, 3, 2)).astype('float32')
  336. onp_value = onp.random.random((4, 3, 2)).astype('float32')
  337. mnp_array = to_tensor(onp_array)
  338. mnp_value = to_tensor(onp_value)
  339. onp_res = onp_append(onp_array, onp_value)
  340. mnp_res = mnp_append(mnp_array, mnp_value)
  341. check_all_results(onp_res, mnp_res)
  342. def construct_arrays(n=1, ndim=1, axis=None, low=1, high=5):
  343. onp_array_lst = []
  344. mnp_array_lst = []
  345. shape = onp.random.randint(low=low, high=high, size=ndim)
  346. new_shape = [sh for sh in shape]
  347. while n > 0:
  348. n -= 1
  349. onp_array1 = onp.random.randint(
  350. low=low, high=high, size=shape).astype(onp.float32)
  351. onp_array_lst.append(onp_array1)
  352. mnp_array_lst.append(to_tensor(onp_array1))
  353. if axis is not None and axis < ndim:
  354. new_shape[axis] += onp.random.randint(2)
  355. onp_array2 = onp.random.randint(
  356. low=low, high=high, size=new_shape).astype(onp.float32)
  357. onp_array_lst.append(onp_array2)
  358. mnp_array_lst.append(to_tensor(onp_array2))
  359. return onp_array_lst, mnp_array_lst
  360. # Test np.xstack
  361. def prepare_array_sequences(n_lst, ndim_lst, axis=None, low=1, high=5):
  362. onp_seq_lst = []
  363. mnp_seq_lst = []
  364. for n in n_lst:
  365. for ndim in ndim_lst:
  366. onp_array_lst, mnp_array_lst = construct_arrays(
  367. n=n, ndim=ndim, axis=axis, low=low, high=high)
  368. onp_seq_lst.append(onp_array_lst)
  369. mnp_seq_lst.append(mnp_array_lst)
  370. return onp_seq_lst, mnp_seq_lst
  371. def mnp_column_stack(input_tensor):
  372. return mnp.column_stack(input_tensor)
  373. def onp_column_stack(input_array):
  374. return onp.column_stack(input_array)
  375. @pytest.mark.level1
  376. @pytest.mark.platform_arm_ascend_training
  377. @pytest.mark.platform_x86_ascend_training
  378. @pytest.mark.platform_x86_gpu_training
  379. @pytest.mark.platform_x86_cpu
  380. @pytest.mark.env_onecard
  381. def test_column_stack():
  382. onp_seq_lst, mnp_seq_lst = prepare_array_sequences(
  383. n_lst=[1, 5], ndim_lst=[1, 2, 3, 4], axis=1)
  384. for i, onp_seq in enumerate(onp_seq_lst):
  385. onp_seq = onp_seq_lst[i]
  386. mnp_seq = mnp_seq_lst[i]
  387. o_column_stack = onp_column_stack(onp_seq)
  388. m_column_stack = mnp_column_stack(mnp_seq)
  389. check_all_results(o_column_stack, m_column_stack)
  390. def mnp_hstack(input_tensor):
  391. return mnp.hstack(input_tensor)
  392. def onp_hstack(input_array):
  393. return onp.hstack(input_array)
  394. @pytest.mark.level1
  395. @pytest.mark.platform_arm_ascend_training
  396. @pytest.mark.platform_x86_ascend_training
  397. @pytest.mark.platform_x86_gpu_training
  398. @pytest.mark.platform_x86_cpu
  399. @pytest.mark.env_onecard
  400. def test_hstack():
  401. onp_seq_lst0, mnp_seq_lst0 = prepare_array_sequences(
  402. n_lst=[1, 5], ndim_lst=[2, 3, 4], axis=1)
  403. onp_seq_lst1, mnp_seq_lst1 = prepare_array_sequences(
  404. n_lst=[1, 5], ndim_lst=[1], axis=0)
  405. onp_seq_lst = onp_seq_lst0 + onp_seq_lst1
  406. mnp_seq_lst = mnp_seq_lst0 + mnp_seq_lst1
  407. for i, onp_seq in enumerate(onp_seq_lst):
  408. mnp_seq = mnp_seq_lst[i]
  409. o_hstack = onp_hstack(onp_seq)
  410. m_hstack = mnp_hstack(mnp_seq)
  411. check_all_results(o_hstack, m_hstack)
  412. def mnp_dstack(input_tensor):
  413. return mnp.dstack(input_tensor)
  414. def onp_dstack(input_array):
  415. return onp.dstack(input_array)
  416. @pytest.mark.level1
  417. @pytest.mark.platform_arm_ascend_training
  418. @pytest.mark.platform_x86_ascend_training
  419. @pytest.mark.platform_x86_gpu_training
  420. @pytest.mark.platform_x86_cpu
  421. @pytest.mark.env_onecard
  422. def test_dstack():
  423. onp_seq_lst, mnp_seq_lst = prepare_array_sequences(
  424. n_lst=[1, 5], ndim_lst=[1, 2, 3, 4], axis=2)
  425. for i, onp_seq in enumerate(onp_seq_lst):
  426. mnp_seq = mnp_seq_lst[i]
  427. o_dstack = onp_dstack(onp_seq)
  428. m_dstack = mnp_dstack(mnp_seq)
  429. check_all_results(o_dstack, m_dstack)
  430. def mnp_vstack(input_tensor):
  431. return mnp.vstack(input_tensor)
  432. def onp_vstack(input_array):
  433. return onp.vstack(input_array)
  434. @pytest.mark.level1
  435. @pytest.mark.platform_arm_ascend_training
  436. @pytest.mark.platform_x86_ascend_training
  437. @pytest.mark.platform_x86_gpu_training
  438. @pytest.mark.platform_x86_cpu
  439. @pytest.mark.env_onecard
  440. def test_vstack():
  441. onp_seq_lst, mnp_seq_lst = prepare_array_sequences(
  442. n_lst=[1], ndim_lst=[2], axis=0)
  443. for i, onp_seq in enumerate(onp_seq_lst):
  444. mnp_seq = mnp_seq_lst[i]
  445. o_vstack = onp_vstack(onp_seq)
  446. m_vstack = mnp_vstack(mnp_seq)
  447. check_all_results(o_vstack, m_vstack)
  448. # Test np.atleastxd
  449. def mnp_atleast1d(*arys):
  450. return mnp.atleast_1d(*arys)
  451. def onp_atleast1d(*arys):
  452. return onp.atleast_1d(*arys)
  453. def mnp_atleast2d(*arys):
  454. return mnp.atleast_2d(*arys)
  455. def onp_atleast2d(*arys):
  456. return onp.atleast_2d(*arys)
  457. def mnp_atleast3d(*arys):
  458. return mnp.atleast_3d(*arys)
  459. def onp_atleast3d(*arys):
  460. return onp.atleast_3d(*arys)
  461. @pytest.mark.level1
  462. @pytest.mark.platform_arm_ascend_training
  463. @pytest.mark.platform_x86_ascend_training
  464. @pytest.mark.platform_x86_gpu_training
  465. @pytest.mark.platform_x86_cpu
  466. @pytest.mark.env_onecard
  467. def test_atleast1d():
  468. run_non_kw_test(mnp_atleast1d, onp_atleast1d, Cases())
  469. @pytest.mark.level1
  470. @pytest.mark.platform_arm_ascend_training
  471. @pytest.mark.platform_x86_ascend_training
  472. @pytest.mark.platform_x86_gpu_training
  473. @pytest.mark.platform_x86_cpu
  474. @pytest.mark.env_onecard
  475. def test_atleast2d():
  476. run_non_kw_test(mnp_atleast2d, onp_atleast2d, Cases())
  477. @pytest.mark.level1
  478. @pytest.mark.platform_arm_ascend_training
  479. @pytest.mark.platform_x86_ascend_training
  480. @pytest.mark.platform_x86_gpu_training
  481. @pytest.mark.platform_x86_cpu
  482. @pytest.mark.env_onecard
  483. def test_atleast3d():
  484. run_non_kw_test(mnp_atleast3d, onp_atleast3d, Cases())
  485. # Test np.where
  486. def mnp_where(condition, x, y):
  487. return mnp.where(condition, x, y)
  488. def onp_where(condition, x, y):
  489. return onp.where(condition, x, y)
  490. @pytest.mark.level0
  491. @pytest.mark.platform_arm_ascend_training
  492. @pytest.mark.platform_x86_ascend_training
  493. @pytest.mark.platform_x86_gpu_training
  494. @pytest.mark.platform_x86_cpu
  495. @pytest.mark.env_onecard
  496. def test_where():
  497. test_case = Cases()
  498. for condition1 in test_case.bool_broadcastables[:2]:
  499. for x in test_case.broadcastables[:2]:
  500. for y in test_case.broadcastables[:2]:
  501. for condition2 in test_case.broadcastables[:2]:
  502. match_res(mnp_where, onp_where, condition1, x, y)
  503. match_res(mnp_where, onp_where, condition2, x, y)
  504. # Test ndarray.flatten
  505. def mnp_ndarray_flatten(input_tensor):
  506. a = input_tensor.flatten()
  507. b = input_tensor.flatten(order='F')
  508. c = input_tensor.flatten(order='C')
  509. return a, b, c
  510. def onp_ndarray_flatten(input_array):
  511. a = input_array.flatten()
  512. b = input_array.flatten(order='F')
  513. c = input_array.flatten(order='C')
  514. return a, b, c
  515. @pytest.mark.level1
  516. @pytest.mark.platform_arm_ascend_training
  517. @pytest.mark.platform_x86_ascend_training
  518. @pytest.mark.platform_x86_gpu_training
  519. @pytest.mark.platform_x86_cpu
  520. @pytest.mark.env_onecard
  521. def test_ndarray_flatten():
  522. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  523. mnp_array = to_tensor(onp_array)
  524. o_flatten = onp_ndarray_flatten(onp_array)
  525. m_flatten = mnp_ndarray_flatten(mnp_array)
  526. check_all_results(o_flatten, m_flatten)
  527. # Test ndarray.transpose
  528. def mnp_ndarray_transpose(input_tensor):
  529. a = input_tensor.T
  530. b = input_tensor.transpose()
  531. c = input_tensor.transpose((0, 2, 1))
  532. d = input_tensor.transpose([0, 2, 1])
  533. return a, b, c, d
  534. def onp_ndarray_transpose(input_array):
  535. a = input_array.T
  536. b = input_array.transpose()
  537. c = input_array.transpose((0, 2, 1))
  538. d = input_array.transpose([0, 2, 1])
  539. return a, b, c, d
  540. @pytest.mark.level1
  541. @pytest.mark.platform_arm_ascend_training
  542. @pytest.mark.platform_x86_ascend_training
  543. @pytest.mark.platform_x86_gpu_training
  544. @pytest.mark.platform_x86_cpu
  545. @pytest.mark.env_onecard
  546. def test_ndarray_transpose():
  547. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  548. mnp_array = to_tensor(onp_array)
  549. o_transposed = onp_ndarray_transpose(onp_array)
  550. m_transposed = mnp_ndarray_transpose(mnp_array)
  551. check_all_results(o_transposed, m_transposed)
  552. # Test ndarray.astype
  553. def mnp_ndarray_astype(input_tensor):
  554. a = input_tensor.astype("float16")
  555. b = input_tensor.astype(onp.float64)
  556. c = input_tensor.astype(mnp.bool_)
  557. return a, b, c
  558. def onp_ndarray_astype(input_array):
  559. a = input_array.astype("float16")
  560. b = input_array.astype(onp.float64)
  561. c = input_array.astype(onp.bool_)
  562. return a, b, c
  563. @pytest.mark.level1
  564. @pytest.mark.platform_arm_ascend_training
  565. @pytest.mark.platform_x86_ascend_training
  566. @pytest.mark.platform_x86_gpu_training
  567. @pytest.mark.platform_x86_cpu
  568. @pytest.mark.env_onecard
  569. def test_ndarray_astype():
  570. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  571. mnp_array = to_tensor(onp_array)
  572. o_astype = onp_ndarray_astype(onp_array)
  573. m_astype = mnp_ndarray_astype(mnp_array)
  574. for arr1, arr2 in zip(o_astype, m_astype):
  575. assert arr1.dtype == arr2.asnumpy().dtype
  576. def onp_concatenate_type_promotion(onp_array1, onp_array2, onp_array3, onp_array4):
  577. o_concatenate = onp.concatenate((onp_array1,
  578. onp_array2,
  579. onp_array3,
  580. onp_array4), -1)
  581. return o_concatenate
  582. def mnp_concatenate_type_promotion(mnp_array1, mnp_array2, mnp_array3, mnp_array4):
  583. m_concatenate = mnp.concatenate([mnp_array1,
  584. mnp_array2,
  585. mnp_array3,
  586. mnp_array4], -1)
  587. return m_concatenate
  588. @pytest.mark.level1
  589. @pytest.mark.platform_arm_ascend_training
  590. @pytest.mark.platform_x86_ascend_training
  591. @pytest.mark.platform_x86_gpu_training
  592. @pytest.mark.platform_x86_cpu
  593. @pytest.mark.env_onecard
  594. def test_concatenate_type_promotion():
  595. onp_array = onp.random.random((5, 1)).astype('float32')
  596. mnp_array = to_tensor(onp_array)
  597. onp_array1 = onp_array.astype(onp.float16)
  598. onp_array2 = onp_array.astype(onp.bool_)
  599. onp_array3 = onp_array.astype(onp.float32)
  600. onp_array4 = onp_array.astype(onp.int32)
  601. mnp_array1 = mnp_array.astype(onp.float16)
  602. mnp_array2 = mnp_array.astype(onp.bool_)
  603. mnp_array3 = mnp_array.astype(onp.float32)
  604. mnp_array4 = mnp_array.astype(onp.int32)
  605. o_concatenate = onp_concatenate_type_promotion(
  606. onp_array1, onp_array2, onp_array3, onp_array4).astype('float32')
  607. m_concatenate = mnp_concatenate_type_promotion(
  608. mnp_array1, mnp_array2, mnp_array3, mnp_array4)
  609. check_all_results(o_concatenate, m_concatenate, error=1e-7)
  610. def mnp_stack(*arrs):
  611. a = mnp.stack(arrs, axis=-4)
  612. b = mnp.stack(arrs, axis=-3)
  613. c = mnp.stack(arrs, axis=0)
  614. d = mnp.stack(arrs, axis=3)
  615. e = mnp.stack(arrs, axis=2)
  616. return a, b, c, d, e
  617. def onp_stack(*arrs):
  618. a = onp.stack(arrs, axis=-4)
  619. b = onp.stack(arrs, axis=-3)
  620. c = onp.stack(arrs, axis=0)
  621. d = onp.stack(arrs, axis=3)
  622. e = onp.stack(arrs, axis=2)
  623. return a, b, c, d, e
  624. @pytest.mark.level1
  625. @pytest.mark.platform_arm_ascend_training
  626. @pytest.mark.platform_x86_ascend_training
  627. @pytest.mark.platform_x86_gpu_training
  628. @pytest.mark.platform_x86_cpu
  629. @pytest.mark.env_onecard
  630. def test_stack():
  631. arr = rand_int(3, 4, 5, 6)
  632. match_res(mnp.stack, onp.stack, arr)
  633. for i in range(-4, 4):
  634. match_res(mnp.stack, onp.stack, arr, axis=i)
  635. arrs = [rand_int(3, 4, 5) for i in range(10)]
  636. match_res(mnp.stack, onp.stack, arrs)
  637. match_res(mnp.stack, onp.stack, tuple(arrs))
  638. match_res(mnp_stack, onp_stack, *arrs)
  639. for i in range(-4, 4):
  640. match_res(mnp.stack, onp.stack, arrs, axis=i)
  641. def mnp_roll(input_tensor):
  642. a = mnp.roll(input_tensor, -3)
  643. b = mnp.roll(input_tensor, [-2, -3], 1)
  644. c = mnp.roll(input_tensor, (3, 0, -5), (-1, -2, 0))
  645. d = mnp.roll(input_tensor, (4,), [0, 0, 1])
  646. return a, b, c, d
  647. def onp_roll(input_array):
  648. a = onp.roll(input_array, -3)
  649. b = onp.roll(input_array, [-2, -3], 1)
  650. c = onp.roll(input_array, (3, 0, -5), (-1, -2, 0))
  651. d = onp.roll(input_array, (4,), [0, 0, 1])
  652. return a, b, c, d
  653. @pytest.mark.level1
  654. @pytest.mark.platform_arm_ascend_training
  655. @pytest.mark.platform_x86_ascend_training
  656. @pytest.mark.platform_x86_gpu_training
  657. @pytest.mark.platform_x86_cpu
  658. @pytest.mark.env_onecard
  659. def test_roll():
  660. arr = rand_int(3, 4, 5)
  661. match_res(mnp_roll, onp_roll, arr)
  662. arr = rand_int(1, 4, 6).astype("int64")
  663. match_res(mnp_roll, onp_roll, arr)
  664. def mnp_moveaxis(a):
  665. a = mnp.moveaxis(a, 3, 3)
  666. b = mnp.moveaxis(a, -1, 4)
  667. c = mnp.moveaxis(a, (2, 1, 4), (0, 3, 2))
  668. d = mnp.moveaxis(a, [-2, -5], [2, -4])
  669. return a, b, c, d
  670. def onp_moveaxis(a):
  671. a = onp.moveaxis(a, 3, 3)
  672. b = onp.moveaxis(a, -1, 4)
  673. c = onp.moveaxis(a, (2, 1, 4), (0, 3, 2))
  674. d = onp.moveaxis(a, [-2, -5], [2, -4])
  675. return a, b, c, d
  676. @pytest.mark.level1
  677. @pytest.mark.platform_arm_ascend_training
  678. @pytest.mark.platform_x86_ascend_training
  679. @pytest.mark.platform_x86_gpu_training
  680. @pytest.mark.platform_x86_cpu
  681. @pytest.mark.env_onecard
  682. def test_moveaxis():
  683. a = rand_int(2, 4, 5, 9, 6)
  684. match_res(mnp_moveaxis, onp_moveaxis, a)
  685. def mnp_tile(x):
  686. a = mnp.tile(x, 1)
  687. b = mnp.tile(x, 3)
  688. c = mnp.tile(x, [5, 1])
  689. d = mnp.tile(x, [5, 1, 2, 3, 7])
  690. return a, b, c, d
  691. def onp_tile(x):
  692. a = onp.tile(x, 1)
  693. b = onp.tile(x, 3)
  694. c = onp.tile(x, [5, 1])
  695. d = onp.tile(x, [5, 1, 2, 3, 7])
  696. return a, b, c, d
  697. @pytest.mark.level1
  698. @pytest.mark.platform_arm_ascend_training
  699. @pytest.mark.platform_x86_ascend_training
  700. @pytest.mark.platform_x86_gpu_training
  701. @pytest.mark.platform_x86_cpu
  702. @pytest.mark.env_onecard
  703. def test_tile():
  704. a = rand_int(2, 3, 4)
  705. match_res(mnp_tile, onp_tile, a)
  706. def mnp_broadcast_to(x):
  707. a = mnp.broadcast_to(x, (2, 3))
  708. b = mnp.broadcast_to(x, (8, 1, 3))
  709. return a, b
  710. def onp_broadcast_to(x):
  711. a = onp.broadcast_to(x, (2, 3))
  712. b = onp.broadcast_to(x, (8, 1, 3))
  713. return a, b
  714. @pytest.mark.level0
  715. @pytest.mark.platform_arm_ascend_training
  716. @pytest.mark.platform_x86_ascend_training
  717. @pytest.mark.platform_x86_gpu_training
  718. @pytest.mark.platform_x86_cpu
  719. @pytest.mark.env_onecard
  720. def test_broadcast_to():
  721. x = rand_int()
  722. match_res(mnp_broadcast_to, onp_broadcast_to, x)
  723. x = rand_int(3)
  724. match_res(mnp_broadcast_to, onp_broadcast_to, x)
  725. x = rand_int(1, 3)
  726. match_res(mnp_broadcast_to, onp_broadcast_to, x)
  727. def mnp_broadcast_arrays(*args):
  728. return mnp.broadcast_arrays(*args)
  729. def onp_broadcast_arrays(*args):
  730. return onp.broadcast_arrays(*args)
  731. @pytest.mark.level1
  732. @pytest.mark.platform_arm_ascend_training
  733. @pytest.mark.platform_x86_ascend_training
  734. @pytest.mark.platform_x86_gpu_training
  735. @pytest.mark.platform_x86_cpu
  736. @pytest.mark.env_onecard
  737. def test_broadcast_arrays():
  738. test_case = Cases()
  739. broadcastables = test_case.broadcastables
  740. for i in range(len(broadcastables)):
  741. arrs = broadcastables[i:]
  742. match_res(mnp_broadcast_arrays, onp_broadcast_arrays, *arrs)
  743. def mnp_flip(x):
  744. a = mnp.flip(x)
  745. b = mnp.flip(x, 0)
  746. c = mnp.flip(x, 1)
  747. d = mnp.flip(x, (-3, -1))
  748. return a, b, c, d
  749. def onp_flip(x):
  750. a = onp.flip(x)
  751. b = onp.flip(x, 0)
  752. c = onp.flip(x, 1)
  753. d = onp.flip(x, (-3, -1))
  754. return a, b, c, d
  755. @pytest.mark.level2
  756. @pytest.mark.platform_arm_ascend_training
  757. @pytest.mark.platform_x86_ascend_training
  758. @pytest.mark.platform_x86_gpu_training
  759. @pytest.mark.platform_x86_cpu
  760. @pytest.mark.env_onecard
  761. def test_flip():
  762. x = rand_int(2, 3, 4)
  763. run_multi_test(mnp_flip, onp_flip, (x,))
  764. def mnp_flipud(x):
  765. return mnp.flipud(x)
  766. def onp_flipud(x):
  767. return onp.flipud(x)
  768. @pytest.mark.level2
  769. @pytest.mark.platform_arm_ascend_training
  770. @pytest.mark.platform_x86_ascend_training
  771. @pytest.mark.platform_x86_gpu_training
  772. @pytest.mark.platform_x86_cpu
  773. @pytest.mark.env_onecard
  774. def test_flipud():
  775. x = rand_int(2, 3, 4)
  776. run_multi_test(mnp_flipud, onp_flipud, (x,))
  777. def mnp_fliplr(x):
  778. return mnp.fliplr(x)
  779. def onp_fliplr(x):
  780. return onp.fliplr(x)
  781. @pytest.mark.level2
  782. @pytest.mark.platform_arm_ascend_training
  783. @pytest.mark.platform_x86_ascend_training
  784. @pytest.mark.platform_x86_gpu_training
  785. @pytest.mark.platform_x86_cpu
  786. @pytest.mark.env_onecard
  787. def test_fliplr():
  788. x = rand_int(2, 3, 4)
  789. run_multi_test(mnp_fliplr, onp_fliplr, (x,))
  790. def mnp_split(input_tensor):
  791. a = mnp.split(input_tensor, indices_or_sections=1)
  792. b = mnp.split(input_tensor, indices_or_sections=3)
  793. return a, b
  794. def onp_split(input_array):
  795. a = onp.split(input_array, indices_or_sections=1)
  796. b = onp.split(input_array, indices_or_sections=3)
  797. return a, b
  798. @pytest.mark.level1
  799. @pytest.mark.platform_arm_ascend_training
  800. @pytest.mark.platform_x86_ascend_training
  801. @pytest.mark.platform_x86_gpu_training
  802. @pytest.mark.platform_x86_cpu
  803. @pytest.mark.env_onecard
  804. def test_split():
  805. onp_arrs = [
  806. onp.random.randint(1, 5, size=(9, 4, 5)).astype('float32')
  807. ]
  808. mnp_arrs = [to_tensor(arr) for arr in onp_arrs]
  809. for onp_arr, mnp_arr in zip(onp_arrs, mnp_arrs):
  810. o_split = onp_split(onp_arr)
  811. m_split = mnp_split(mnp_arr)
  812. for expect_lst, actual_lst in zip(o_split, m_split):
  813. for expect, actual in zip(expect_lst, actual_lst):
  814. match_array(expect, actual.asnumpy())
  815. def mnp_array_split(input_tensor):
  816. a = mnp.array_split(input_tensor, indices_or_sections=4, axis=2)
  817. b = mnp.array_split(input_tensor, indices_or_sections=3, axis=1)
  818. c = mnp.array_split(input_tensor, indices_or_sections=6)
  819. return a, b, c
  820. def onp_array_split(input_array):
  821. a = onp.array_split(input_array, indices_or_sections=4, axis=2)
  822. b = onp.array_split(input_array, indices_or_sections=3, axis=1)
  823. c = onp.array_split(input_array, indices_or_sections=6)
  824. return a, b, c
  825. @pytest.mark.level1
  826. @pytest.mark.platform_arm_ascend_training
  827. @pytest.mark.platform_x86_ascend_training
  828. @pytest.mark.platform_x86_gpu_training
  829. @pytest.mark.platform_x86_cpu
  830. @pytest.mark.env_onecard
  831. def test_array_split():
  832. onp_arr = onp.random.randint(1, 5, size=(9, 7, 13)).astype('float32')
  833. mnp_arr = to_tensor(onp_arr)
  834. o_split = onp_split(onp_arr)
  835. m_split = mnp_split(mnp_arr)
  836. for expect_lst, actual_lst in zip(o_split, m_split):
  837. for expect, actual in zip(expect_lst, actual_lst):
  838. match_array(expect, actual.asnumpy())
  839. def mnp_vsplit(input_tensor):
  840. a = mnp.vsplit(input_tensor, indices_or_sections=3)
  841. return a
  842. def onp_vsplit(input_array):
  843. a = onp.vsplit(input_array, indices_or_sections=3)
  844. return a
  845. @pytest.mark.level1
  846. @pytest.mark.platform_arm_ascend_training
  847. @pytest.mark.platform_x86_ascend_training
  848. @pytest.mark.platform_x86_gpu_training
  849. @pytest.mark.platform_x86_cpu
  850. @pytest.mark.env_onecard
  851. def test_vsplit():
  852. onp_arrs = [
  853. onp.random.randint(1, 5, size=(9, 4, 5)).astype('float32')
  854. ]
  855. mnp_arrs = [to_tensor(arr) for arr in onp_arrs]
  856. for onp_arr, mnp_arr in zip(onp_arrs, mnp_arrs):
  857. o_vsplit = onp_vsplit(onp_arr)
  858. m_vsplit = mnp_vsplit(mnp_arr)
  859. for expect_lst, actual_lst in zip(o_vsplit, m_vsplit):
  860. for expect, actual in zip(expect_lst, actual_lst):
  861. match_array(expect, actual.asnumpy())
  862. def mnp_hsplit(input_tensor):
  863. a = mnp.hsplit(input_tensor, indices_or_sections=3)
  864. return a
  865. def onp_hsplit(input_array):
  866. a = onp.hsplit(input_array, indices_or_sections=3)
  867. return a
  868. @pytest.mark.level1
  869. @pytest.mark.platform_arm_ascend_training
  870. @pytest.mark.platform_x86_ascend_training
  871. @pytest.mark.platform_x86_gpu_training
  872. @pytest.mark.platform_x86_cpu
  873. @pytest.mark.env_onecard
  874. def test_hsplit():
  875. onp_arrs = [
  876. onp.random.randint(1, 5, size=(4, 9, 5)).astype('float32')
  877. ]
  878. mnp_arrs = [to_tensor(arr) for arr in onp_arrs]
  879. for onp_arr, mnp_arr in zip(onp_arrs, mnp_arrs):
  880. o_hsplit = onp_hsplit(onp_arr)
  881. m_hsplit = mnp_hsplit(mnp_arr)
  882. for expect_lst, actual_lst in zip(o_hsplit, m_hsplit):
  883. for expect, actual in zip(expect_lst, actual_lst):
  884. match_array(expect, actual.asnumpy())
  885. def mnp_dsplit(input_tensor):
  886. a = mnp.dsplit(input_tensor, indices_or_sections=3)
  887. return a
  888. def onp_dsplit(input_array):
  889. a = onp.dsplit(input_array, indices_or_sections=3)
  890. return a
  891. @pytest.mark.level1
  892. @pytest.mark.platform_arm_ascend_training
  893. @pytest.mark.platform_x86_ascend_training
  894. @pytest.mark.platform_x86_gpu_training
  895. @pytest.mark.platform_x86_cpu
  896. @pytest.mark.env_onecard
  897. def test_dsplit():
  898. onp_arrs = [
  899. onp.random.randint(1, 5, size=(5, 4, 9)).astype('float32')
  900. ]
  901. mnp_arrs = [to_tensor(arr) for arr in onp_arrs]
  902. for onp_arr, mnp_arr in zip(onp_arrs, mnp_arrs):
  903. o_dsplit = onp_dsplit(onp_arr)
  904. m_dsplit = mnp_dsplit(mnp_arr)
  905. for expect_lst, actual_lst in zip(o_dsplit, m_dsplit):
  906. for expect, actual in zip(expect_lst, actual_lst):
  907. match_array(expect, actual.asnumpy())
  908. def mnp_take_along_axis(*arrs):
  909. x = arrs[0]
  910. a = mnp.take_along_axis(x, arrs[1], axis=None)
  911. b = mnp.take_along_axis(x, arrs[2], axis=1)
  912. c = mnp.take_along_axis(x, arrs[3], axis=-1)
  913. d = mnp.take_along_axis(x, arrs[4], axis=0)
  914. return a, b, c, d
  915. def onp_take_along_axis(*arrs):
  916. x = arrs[0]
  917. a = onp.take_along_axis(x, arrs[1], axis=None)
  918. b = onp.take_along_axis(x, arrs[2], axis=1)
  919. c = onp.take_along_axis(x, arrs[3], axis=-1)
  920. d = onp.take_along_axis(x, arrs[4], axis=0)
  921. return a, b, c, d
  922. @pytest.mark.level1
  923. @pytest.mark.platform_arm_ascend_training
  924. @pytest.mark.platform_x86_ascend_training
  925. @pytest.mark.platform_x86_gpu_training
  926. @pytest.mark.platform_x86_cpu
  927. @pytest.mark.env_onecard
  928. def test_take_along_axis():
  929. x = rand_int(6, 7, 8, 9)
  930. indices1 = rand_int(2).astype(onp.int32)
  931. indices2 = rand_int(6, 3, 8, 1).astype(onp.int32)
  932. indices3 = rand_int(6, 1, 8, 5).astype(onp.int32)
  933. indices4 = rand_int(4, 1, 1, 1).astype(onp.int32)
  934. run_multi_test(mnp_take_along_axis, onp_take_along_axis,
  935. (x, indices1, indices2, indices3, indices4))
  936. def mnp_take(x, indices):
  937. a = mnp.take(x, indices)
  938. b = mnp.take(x, indices, axis=-1)
  939. c = mnp.take(x, indices, axis=0, mode='wrap')
  940. d = mnp.take(x, indices, axis=1, mode='clip')
  941. return a, b, c, d
  942. def onp_take(x, indices):
  943. a = onp.take(x, indices)
  944. b = onp.take(x, indices, axis=-1)
  945. c = onp.take(x, indices, axis=0, mode='wrap')
  946. d = onp.take(x, indices, axis=1, mode='clip')
  947. return a, b, c, d
  948. @pytest.mark.level1
  949. @pytest.mark.platform_arm_ascend_training
  950. @pytest.mark.platform_x86_ascend_training
  951. @pytest.mark.platform_x86_gpu_training
  952. @pytest.mark.platform_x86_cpu
  953. @pytest.mark.env_onecard
  954. def test_take():
  955. x = rand_int(2, 3, 4, 5)
  956. indices = rand_int(2, 3).astype(onp.int32)
  957. run_multi_test(mnp_take, onp_take, (x, indices))
  958. def mnp_repeat(x):
  959. a = mnp.repeat(x, 2)
  960. b = mnp.repeat(x, 3, axis=0)
  961. c = mnp.repeat(x, (4, 1, 5), axis=1)
  962. d = mnp.repeat(x, (3, 2, 1, 0, 4), axis=-1)
  963. e = mnp.repeat(x, 0)
  964. return a, b, c, d, e
  965. def onp_repeat(x):
  966. a = onp.repeat(x, 2)
  967. b = onp.repeat(x, 3, axis=0)
  968. c = onp.repeat(x, (4, 1, 5), axis=1)
  969. d = onp.repeat(x, (3, 2, 1, 0, 4), axis=-1)
  970. e = onp.repeat(x, 0)
  971. return a, b, c, d, e
  972. @pytest.mark.level1
  973. @pytest.mark.platform_arm_ascend_training
  974. @pytest.mark.platform_x86_ascend_training
  975. @pytest.mark.platform_x86_gpu_training
  976. @pytest.mark.platform_x86_cpu
  977. @pytest.mark.env_onecard
  978. def test_repeat():
  979. x = rand_int(2, 3, 4, 5)
  980. run_multi_test(mnp_repeat, onp_repeat, (x,))
  981. @pytest.mark.level1
  982. @pytest.mark.platform_arm_ascend_training
  983. @pytest.mark.platform_x86_ascend_training
  984. @pytest.mark.platform_x86_gpu_training
  985. @pytest.mark.platform_x86_cpu
  986. @pytest.mark.env_onecard
  987. def test_select():
  988. choicelist = rand_int(2, 3, 4, 5)
  989. condlist = choicelist > 2
  990. match_res(mnp.select, onp.select, condlist, choicelist)
  991. match_res(mnp.select, onp.select, condlist, choicelist, default=10)
  992. condlist = rand_bool(5, 4, 1, 3)
  993. choicelist = rand_int(5, 3)
  994. match_res(mnp.select, onp.select, condlist, choicelist)
  995. match_res(mnp.select, onp.select, condlist, choicelist, default=10)
  996. condlist = rand_bool(3, 1, 7)
  997. choicelist = rand_int(3, 5, 2, 1)
  998. match_res(mnp.select, onp.select, condlist, choicelist)
  999. match_res(mnp.select, onp.select, condlist, choicelist, default=10)
  1000. @pytest.mark.level0
  1001. @pytest.mark.platform_x86_gpu_training
  1002. @pytest.mark.platform_x86_cpu
  1003. @pytest.mark.env_onecard
  1004. def test_choose():
  1005. x = rand_int(2, 1, 4).astype(onp.int32)
  1006. y = rand_int(3, 2, 5, 4).astype(onp.int32)
  1007. match_res(mnp.choose, onp.choose, x, y, mode='wrap', dtype=mnp.int32)
  1008. match_res(mnp.choose, onp.choose, x, y, mode='clip', dtype=mnp.int32)
  1009. x = rand_int(5, 3, 1, 7).astype(onp.int32)
  1010. y1 = rand_int(7).astype(onp.int32)
  1011. y2 = rand_int(1, 3, 1).astype(onp.int32)
  1012. y3 = rand_int(5, 1, 1, 7).astype(onp.int32)
  1013. onp_arrays = (x, (y1, y2, y3))
  1014. mnp_arrays = (to_tensor(x), tuple(map(to_tensor, (y1, y2, y3))))
  1015. match_all_arrays(mnp.choose(*mnp_arrays, mode='wrap'), onp.choose(*onp_arrays, mode='wrap'))
  1016. match_all_arrays(mnp.choose(*mnp_arrays, mode='clip'), onp.choose(*onp_arrays, mode='clip'))
  1017. class ReshapeExpandSqueeze(Cell):
  1018. def __init__(self):
  1019. super(ReshapeExpandSqueeze, self).__init__()
  1020. def construct(self, x):
  1021. x = mnp.expand_dims(x, 2)
  1022. x = mnp.reshape(x, (1, 2, 3, 4, 1, 1))
  1023. x = mnp.squeeze(x)
  1024. return x
  1025. class TransposeConcatRavel(Cell):
  1026. def __init__(self):
  1027. super(TransposeConcatRavel, self).__init__()
  1028. def construct(self, x1, x2, x3):
  1029. x1 = mnp.transpose(x1, [0, 2, 1])
  1030. x2 = x2.transpose(0, 2, 1)
  1031. x = mnp.concatenate((x1, x2, x3), -1)
  1032. x = mnp.ravel(x)
  1033. return x
  1034. class RollSwap(Cell):
  1035. def __init__(self):
  1036. super(RollSwap, self).__init__()
  1037. def construct(self, x):
  1038. x = mnp.rollaxis(x, 2)
  1039. x = mnp.swapaxes(x, 0, 1)
  1040. return x
  1041. test_case_array_ops = [
  1042. ('ReshapeExpandSqueeze', {
  1043. 'block': ReshapeExpandSqueeze(),
  1044. 'desc_inputs': [mnp.ones((2, 3, 4))]}),
  1045. ('TransposeConcatRavel', {
  1046. 'block': TransposeConcatRavel(),
  1047. 'desc_inputs': [mnp.ones((2, 3, 4)),
  1048. mnp.ones((2, 3, 4)),
  1049. mnp.ones((2, 4, 1))]}),
  1050. ('RollSwap', {
  1051. 'block': RollSwap(),
  1052. 'desc_inputs': [mnp.ones((2, 3, 4))]})
  1053. ]
  1054. test_case_lists = [test_case_array_ops]
  1055. test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists)
  1056. # use -k to select certain testcast
  1057. # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm
  1058. @pytest.mark.level1
  1059. @pytest.mark.platform_arm_ascend_training
  1060. @pytest.mark.platform_x86_ascend_training
  1061. @pytest.mark.platform_x86_gpu_training
  1062. @pytest.mark.platform_x86_cpu
  1063. @pytest.mark.env_onecard
  1064. def test_expand_dims_exception():
  1065. with pytest.raises(TypeError):
  1066. mnp.expand_dims(mnp.ones((3, 3)), 1.2)
  1067. @pytest.mark.level1
  1068. @pytest.mark.platform_arm_ascend_training
  1069. @pytest.mark.platform_x86_ascend_training
  1070. @pytest.mark.platform_x86_gpu_training
  1071. @pytest.mark.platform_x86_cpu
  1072. @pytest.mark.env_onecard
  1073. def test_swapaxes_exception():
  1074. with pytest.raises(ValueError):
  1075. mnp.swapaxes(mnp.ones((3, 3)), 1, 10)
  1076. @pytest.mark.level1
  1077. @pytest.mark.platform_arm_ascend_training
  1078. @pytest.mark.platform_x86_ascend_training
  1079. @pytest.mark.platform_x86_gpu_training
  1080. @pytest.mark.platform_x86_cpu
  1081. @pytest.mark.env_onecard
  1082. def test_tensor_flatten():
  1083. lst = [[1.0, 2.0], [3.0, 4.0]]
  1084. tensor_list = to_tensor(lst)
  1085. assert tensor_list.flatten().asnumpy().tolist() == [1.0, 2.0, 3.0, 4.0]
  1086. assert tensor_list.flatten(order='F').asnumpy().tolist() == [
  1087. 1.0, 3.0, 2.0, 4.0]
  1088. @pytest.mark.level1
  1089. @pytest.mark.platform_arm_ascend_training
  1090. @pytest.mark.platform_x86_ascend_training
  1091. @pytest.mark.platform_x86_gpu_training
  1092. @pytest.mark.platform_x86_cpu
  1093. @pytest.mark.env_onecard
  1094. def test_tensor_reshape():
  1095. lst = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]
  1096. tensor_list = to_tensor(lst)
  1097. with pytest.raises(TypeError):
  1098. tensor_list = tensor_list.reshape({0, 1, 2})
  1099. with pytest.raises(ValueError):
  1100. tensor_list = tensor_list.reshape(1, 2, 3)
  1101. assert tensor_list.reshape([-1, 4]).shape == (2, 4)
  1102. assert tensor_list.reshape(1, -1, 4).shape == (1, 2, 4)
  1103. @pytest.mark.level1
  1104. @pytest.mark.platform_arm_ascend_training
  1105. @pytest.mark.platform_x86_ascend_training
  1106. @pytest.mark.platform_x86_gpu_training
  1107. @pytest.mark.platform_x86_cpu
  1108. @pytest.mark.env_onecard
  1109. def test_tensor_squeeze():
  1110. lst = [[[1.0], [2.0], [3.0]]]
  1111. tensor_list = to_tensor(lst)
  1112. with pytest.raises(TypeError):
  1113. tensor_list = tensor_list.squeeze(1.2)
  1114. with pytest.raises(ValueError):
  1115. tensor_list = tensor_list.squeeze(4)
  1116. assert tensor_list.squeeze().shape == (3,)
  1117. assert tensor_list.squeeze(axis=2).shape == (1, 3)
  1118. @pytest.mark.level1
  1119. @pytest.mark.platform_arm_ascend_training
  1120. @pytest.mark.platform_x86_ascend_training
  1121. @pytest.mark.platform_x86_gpu_training
  1122. @pytest.mark.platform_x86_cpu
  1123. @pytest.mark.env_onecard
  1124. def test_tensor_ravel():
  1125. lst = [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]
  1126. tensor_list = to_tensor(lst)
  1127. assert tensor_list.ravel().shape == (8,)
  1128. assert tensor_list.ravel().asnumpy().tolist() == [
  1129. 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]
  1130. @pytest.mark.level1
  1131. @pytest.mark.platform_arm_ascend_training
  1132. @pytest.mark.platform_x86_ascend_training
  1133. @pytest.mark.platform_x86_gpu_training
  1134. @pytest.mark.platform_x86_cpu
  1135. @pytest.mark.env_onecard
  1136. def test_tensor_swapaxes():
  1137. lst = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]
  1138. tensor_list = to_tensor(lst)
  1139. with pytest.raises(TypeError):
  1140. tensor_list = tensor_list.swapaxes(0, (1,))
  1141. with pytest.raises(ValueError):
  1142. tensor_list = tensor_list.swapaxes(0, 3)
  1143. assert tensor_list.swapaxes(0, 1).shape == (3, 2)
  1144. def mnp_rot90(input_tensor):
  1145. a = mnp.rot90(input_tensor)
  1146. b = mnp.rot90(input_tensor, 2)
  1147. c = mnp.rot90(input_tensor, 3)
  1148. d = mnp.rot90(input_tensor, 4)
  1149. e = mnp.rot90(input_tensor, 5, (0, -1))
  1150. f = mnp.rot90(input_tensor, 1, (2, 0))
  1151. g = mnp.rot90(input_tensor, -3, (-1, -2))
  1152. h = mnp.rot90(input_tensor, 3, (2, 1))
  1153. return a, b, c, d, e, f, g, h
  1154. def onp_rot90(input_array):
  1155. a = onp.rot90(input_array)
  1156. b = onp.rot90(input_array, 2)
  1157. c = onp.rot90(input_array, 3)
  1158. d = onp.rot90(input_array, 4)
  1159. e = onp.rot90(input_array, 5, (0, -1))
  1160. f = onp.rot90(input_array, 1, (2, 0))
  1161. g = onp.rot90(input_array, -3, (-1, -2))
  1162. h = onp.rot90(input_array, 3, (2, 1))
  1163. return a, b, c, d, e, f, g, h
  1164. @pytest.mark.level2
  1165. @pytest.mark.platform_arm_ascend_training
  1166. @pytest.mark.platform_x86_ascend_training
  1167. @pytest.mark.platform_x86_gpu_training
  1168. @pytest.mark.platform_x86_cpu
  1169. @pytest.mark.env_onecard
  1170. def test_rot90():
  1171. onp_array = rand_int(3, 4, 5).astype('float32')
  1172. mnp_array = to_tensor(onp_array)
  1173. o_rot = onp_rot90(onp_array)
  1174. m_rot = mnp_rot90(mnp_array)
  1175. check_all_results(o_rot, m_rot)
  1176. def mnp_size(x):
  1177. a = mnp.size(x)
  1178. b = mnp.size(x, axis=0)
  1179. return a, b
  1180. def onp_size(x):
  1181. a = onp.size(x)
  1182. b = onp.size(x, axis=0)
  1183. return a, b
  1184. @pytest.mark.level1
  1185. @pytest.mark.platform_arm_ascend_training
  1186. @pytest.mark.platform_x86_ascend_training
  1187. @pytest.mark.platform_x86_gpu_training
  1188. @pytest.mark.platform_x86_cpu
  1189. @pytest.mark.env_onecard
  1190. def test_size():
  1191. onp_arr = onp.random.rand(2, 3, 4).astype('float32')
  1192. mnp_arr = to_tensor(onp_arr)
  1193. for actual, expected in zip(mnp_size(mnp_arr), onp_size(onp_arr)):
  1194. match_array(actual, expected)
  1195. def mnp_array_str(x):
  1196. return mnp.array_str(x)
  1197. def onp_array_str(x):
  1198. return onp.array_str(x)
  1199. @pytest.mark.level1
  1200. @pytest.mark.platform_arm_ascend_training
  1201. @pytest.mark.platform_x86_ascend_training
  1202. @pytest.mark.platform_x86_gpu_training
  1203. @pytest.mark.platform_x86_cpu
  1204. @pytest.mark.env_onecard
  1205. def test_array_str():
  1206. onp_arr = onp.random.rand(2, 3, 4).astype('float32')
  1207. mnp_arr = to_tensor(onp_arr)
  1208. for actual, expected in zip(mnp_size(mnp_arr), onp_size(onp_arr)):
  1209. match_array(actual, expected)
  1210. @pytest.mark.level1
  1211. @pytest.mark.platform_arm_ascend_training
  1212. @pytest.mark.platform_x86_ascend_training
  1213. @pytest.mark.platform_x86_gpu_training
  1214. @pytest.mark.platform_x86_cpu
  1215. @pytest.mark.env_onecard
  1216. def test_apply_along_axis():
  1217. onp_arr = rand_int(5, 3, 7)
  1218. mnp_arr = to_tensor(onp_arr)
  1219. for i in range(-3, 3):
  1220. mnp_res = mnp.apply_along_axis(mnp.diag, i, mnp_arr)
  1221. onp_res = onp.apply_along_axis(onp.diag, i, onp_arr)
  1222. match_all_arrays(mnp_res, onp_res)
  1223. mnp_res = mnp.apply_along_axis(lambda x: x[0], 2, mnp_arr)
  1224. onp_res = onp.apply_along_axis(lambda x: x[0], 2, onp_arr)
  1225. match_all_arrays(mnp_res, onp_res)
  1226. mnp_res = mnp.apply_along_axis(lambda x, y, offset=0: (x[4] - y)*offset, 2, mnp_arr, 1, offset=3)
  1227. onp_res = onp.apply_along_axis(lambda x, y, offset=0: (x[4] - y)*offset, 2, onp_arr, 1, offset=3)
  1228. match_all_arrays(mnp_res, onp_res)
  1229. @pytest.mark.level1
  1230. @pytest.mark.platform_arm_ascend_training
  1231. @pytest.mark.platform_x86_ascend_training
  1232. @pytest.mark.platform_x86_gpu_training
  1233. @pytest.mark.platform_x86_cpu
  1234. @pytest.mark.env_onecard
  1235. def test_tensor_resize():
  1236. x = rand_int(3, 5)
  1237. mnp_x = to_tensor(x)
  1238. x.resize(2, 4, refcheck=False)
  1239. mnp_x = mnp_x.resize(2, 4)
  1240. match_array(mnp_x.asnumpy(), x)
  1241. x.resize((3, 1), refcheck=False)
  1242. mnp_x = mnp_x.resize((3, 1))
  1243. match_array(mnp_x.asnumpy(), x)
  1244. x.resize(7, 4, refcheck=False)
  1245. mnp_x = mnp_x.resize(7, 4)
  1246. match_array(mnp_x.asnumpy(), x)
  1247. @pytest.mark.level1
  1248. @pytest.mark.platform_arm_ascend_training
  1249. @pytest.mark.platform_x86_ascend_training
  1250. @pytest.mark.platform_x86_gpu_training
  1251. @pytest.mark.platform_x86_cpu
  1252. @pytest.mark.env_onecard
  1253. def test_piecewise():
  1254. x = rand_int(2, 4)
  1255. mnp_x = to_tensor(x)
  1256. condlist = [x < 2, x == 2, x > 2]
  1257. mnp_condlist = [mnp_x < 2, mnp_x == 2, mnp_x > 2]
  1258. funclist = [lambda x, offset=0: x - offset, lambda x, offset=0: x, lambda x, offset=0: x*offset]
  1259. mnp_res = mnp.piecewise(mnp_x, mnp_condlist, funclist, offset=2)
  1260. onp_res = onp.piecewise(x, condlist, funclist, offset=2)
  1261. match_all_arrays(mnp_res, onp_res)
  1262. funclist = [-1, 0, 1]
  1263. mnp_res = mnp.piecewise(mnp_x, mnp_condlist, funclist)
  1264. onp_res = onp.piecewise(x, condlist, funclist)
  1265. match_all_arrays(mnp_res, onp_res)
  1266. condlist = [x > 10, x < 0]
  1267. mnp_x = to_tensor(x)
  1268. mnp_condlist = [mnp_x > 10, mnp_x < 0]
  1269. funclist = [lambda x: x - 2, lambda x: x - 1, lambda x: x*2]
  1270. mnp_res = mnp.piecewise(mnp_x, mnp_condlist, funclist)
  1271. onp_res = onp.piecewise(x, condlist, funclist)
  1272. match_all_arrays(mnp_res, onp_res)
  1273. x = 2
  1274. condlist = True
  1275. funclist = [lambda x: x - 1]
  1276. mnp_res = mnp.piecewise(x, condlist, funclist)
  1277. onp_res = onp.piecewise(x, condlist, funclist)
  1278. match_all_arrays(mnp_res, onp_res)
  1279. @pytest.mark.level0
  1280. @pytest.mark.platform_arm_ascend_training
  1281. @pytest.mark.platform_x86_ascend_training
  1282. @pytest.mark.platform_x86_gpu_training
  1283. @pytest.mark.platform_x86_cpu
  1284. @pytest.mark.env_onecard
  1285. def test_unravel_index():
  1286. shapes = [(2, 6, 3)]
  1287. dims = [(5, 4, 7), 5*4*7]
  1288. for shape in shapes:
  1289. x = onp.random.randint(0, 5*4*7, shape)
  1290. for dim in dims:
  1291. for order in ('C', 'F'):
  1292. mnp_res = mnp.unravel_index(to_tensor(x), dim, order=order)
  1293. onp_res = onp.unravel_index(x, dim, order=order)
  1294. match_all_arrays(mnp_res, onp_res)
  1295. def mnp_apply_over_axes(x):
  1296. a = mnp.apply_over_axes(mnp.sum, x, axes=0)
  1297. b = mnp.apply_over_axes(mnp.sum, x, axes=(0, 1))
  1298. c = mnp.apply_over_axes(mnp.std, x, axes=1)
  1299. d = mnp.apply_over_axes(mnp.mean, x, axes=(-1,))
  1300. return a, b, c, d
  1301. def onp_apply_over_axes(x):
  1302. a = onp.apply_over_axes(onp.sum, x, axes=0)
  1303. b = onp.apply_over_axes(onp.sum, x, axes=(0, 1))
  1304. c = onp.apply_over_axes(onp.std, x, axes=1)
  1305. d = onp.apply_over_axes(onp.mean, x, axes=(-1,))
  1306. return a, b, c, d
  1307. @pytest.mark.level1
  1308. @pytest.mark.platform_arm_ascend_training
  1309. @pytest.mark.platform_x86_ascend_training
  1310. @pytest.mark.platform_x86_gpu_training
  1311. @pytest.mark.platform_x86_cpu
  1312. @pytest.mark.env_onecard
  1313. def test_apply_over_axes():
  1314. arrs = [
  1315. onp.random.rand(2, 2).astype('float32'),
  1316. onp.random.rand(3, 2, 2).astype('float32'),
  1317. onp.random.rand(5, 4, 3, 3).astype('float32'),
  1318. ]
  1319. for x in arrs:
  1320. for expected, actual in zip(onp_apply_over_axes(x),
  1321. mnp_apply_over_axes(to_tensor(x))):
  1322. match_array(actual.asnumpy(), expected, error=5)
  1323. @pytest.mark.level2
  1324. @pytest.mark.platform_arm_ascend_training
  1325. @pytest.mark.platform_x86_ascend_training
  1326. @pytest.mark.platform_x86_gpu_training
  1327. @pytest.mark.platform_x86_cpu
  1328. @pytest.mark.env_onecard
  1329. def test_tensor_choose():
  1330. x = rand_int(2, 1, 4).astype(onp.int32)
  1331. mnp_x = to_tensor(x)
  1332. y = rand_int(3, 2, 5, 4).astype(onp.int32)
  1333. match_res(mnp_x.choose, x.choose, y, mode='wrap')
  1334. match_res(mnp_x.choose, x.choose, y, mode='clip')
  1335. x = rand_int(5, 3, 1, 7).astype(onp.int32)
  1336. mnp_x = to_tensor(x)
  1337. y1 = rand_int(7).astype(onp.int32)
  1338. y2 = rand_int(1, 3, 1).astype(onp.int32)
  1339. y3 = rand_int(5, 1, 1, 7).astype(onp.int32)
  1340. onp_arrays = (y1, y2, y3)
  1341. mnp_arrays = tuple(map(to_tensor, (y1, y2, y3)))
  1342. match_all_arrays(mnp_x.choose(mnp_arrays, mode='wrap'), x.choose(onp_arrays, mode='wrap'))
  1343. match_all_arrays(mnp_x.choose(mnp_arrays, mode='clip'), x.choose(onp_arrays, mode='clip'))