You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_array_ops.py 31 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """unit tests for numpy array operations"""
  16. import functools
  17. import pytest
  18. import numpy as onp
  19. import mindspore.numpy as mnp
  20. from mindspore.nn import Cell
  21. class Cases():
  22. def __init__(self):
  23. self.all_shapes = [
  24. 0, 1, 2, (), (1,), (2,), (1, 2, 3), [], [1], [2], [1, 2, 3]
  25. ]
  26. self.onp_dtypes = [onp.int32, 'int32', int,
  27. onp.float32, 'float32', float,
  28. onp.uint32, 'uint32',
  29. onp.bool_, 'bool', bool]
  30. self.mnp_dtypes = [mnp.int32, 'int32', int,
  31. mnp.float32, 'float32', float,
  32. mnp.uint32, 'uint32',
  33. mnp.bool_, 'bool', bool]
  34. self.array_sets = [1, 1.1, True, [1, 0, True], [1, 1.0, 2], (1,),
  35. [(1, 2, 3), (4, 5, 6)], onp.random.random( # pylint: disable=no-member
  36. (100, 100)).astype(onp.float32),
  37. onp.random.random((100, 100)).astype(onp.bool)]
  38. self.arrs = [
  39. rand_int(2),
  40. rand_int(2, 3),
  41. rand_int(2, 3, 4),
  42. rand_int(2, 3, 4, 5),
  43. ]
  44. # scalars expanded across the 0th dimension
  45. self.scalars = [
  46. rand_int(),
  47. rand_int(1),
  48. rand_int(1, 1),
  49. rand_int(1, 1, 1),
  50. ]
  51. # arrays of the same size expanded across the 0th dimension
  52. self.expanded_arrs = [
  53. rand_int(2, 3),
  54. rand_int(1, 2, 3),
  55. rand_int(1, 1, 2, 3),
  56. rand_int(1, 1, 1, 2, 3),
  57. ]
  58. # arrays with dimensions of size 1
  59. self.nested_arrs = [
  60. rand_int(1),
  61. rand_int(1, 2),
  62. rand_int(3, 1, 8),
  63. rand_int(1, 3, 9, 1),
  64. ]
  65. # arrays which can be broadcast
  66. self.broadcastables = [
  67. rand_int(5),
  68. rand_int(6, 1),
  69. rand_int(7, 1, 5),
  70. rand_int(8, 1, 6, 1)
  71. ]
  72. # boolean arrays which can be broadcast
  73. self.bool_broadcastables = [
  74. rand_bool(),
  75. rand_bool(1),
  76. rand_bool(5),
  77. rand_bool(6, 1),
  78. rand_bool(7, 1, 5),
  79. rand_bool(8, 1, 6, 1),
  80. ]
  81. self.mnp_prototypes = [
  82. mnp.ones((2, 3, 4)),
  83. mnp.ones((0, 3, 0, 2, 5)),
  84. onp.ones((2, 7, 0)),
  85. onp.ones(()),
  86. [mnp.ones(3), (1, 2, 3), onp.ones(3), [4, 5, 6]],
  87. ([(1, 2), mnp.ones(2)], (onp.ones(2), [3, 4])),
  88. ]
  89. self.onp_prototypes = [
  90. onp.ones((2, 3, 4)),
  91. onp.ones((0, 3, 0, 2, 5)),
  92. onp.ones((2, 7, 0)),
  93. onp.ones(()),
  94. [onp.ones(3), (1, 2, 3), onp.ones(3), [4, 5, 6]],
  95. ([(1, 2), onp.ones(2)], (onp.ones(2), [3, 4])),
  96. ]
  97. def match_array(actual, expected, error=0):
  98. if error > 0:
  99. onp.testing.assert_almost_equal(actual.tolist(), expected.tolist(),
  100. decimal=error)
  101. else:
  102. onp.testing.assert_equal(actual.tolist(), expected.tolist())
  103. def check_all_results(onp_results, mnp_results, error=0):
  104. """Check all results from numpy and mindspore.numpy"""
  105. for i, _ in enumerate(onp_results):
  106. match_array(onp_results[i], mnp_results[i].asnumpy())
  107. def run_non_kw_test(mnp_fn, onp_fn):
  108. """Run tests on functions with non keyword arguments"""
  109. test_case = Cases()
  110. for i in range(len(test_case.arrs)):
  111. arrs = test_case.arrs[:i]
  112. match_res(mnp_fn, onp_fn, *arrs)
  113. for i in range(len(test_case.scalars)):
  114. arrs = test_case.scalars[:i]
  115. match_res(mnp_fn, onp_fn, *arrs)
  116. for i in range(len(test_case.expanded_arrs)):
  117. arrs = test_case.expanded_arrs[:i]
  118. match_res(mnp_fn, onp_fn, *arrs)
  119. for i in range(len(test_case.nested_arrs)):
  120. arrs = test_case.nested_arrs[:i]
  121. match_res(mnp_fn, onp_fn, *arrs)
  122. def rand_int(*shape):
  123. """return an random integer array with parameter shape"""
  124. res = onp.random.randint(low=1, high=5, size=shape)
  125. if isinstance(res, onp.ndarray):
  126. return res.astype(onp.float32)
  127. return float(res)
  128. # return an random boolean array
  129. def rand_bool(*shape):
  130. return onp.random.rand(*shape) > 0.5
  131. def match_res(mnp_fn, onp_fn, *arrs, **kwargs):
  132. """Checks results from applying mnp_fn and onp_fn on arrs respectively"""
  133. mnp_arrs = map(functools.partial(mnp.asarray, dtype='float32'), arrs)
  134. mnp_res = mnp_fn(*mnp_arrs, **kwargs)
  135. onp_res = onp_fn(*arrs, **kwargs)
  136. match_all_arrays(mnp_res, onp_res)
  137. def match_all_arrays(mnp_res, onp_res, error=0):
  138. if isinstance(mnp_res, (tuple, list)):
  139. for actual, expected in zip(mnp_res, onp_res):
  140. match_array(actual.asnumpy(), expected, error)
  141. else:
  142. match_array(mnp_res.asnumpy(), onp_res, error)
  143. def match_meta(actual, expected):
  144. # float64 and int64 are not supported, and the defualt type for
  145. # float and int are float32 and int32, respectively
  146. if expected.dtype == onp.float64:
  147. expected = expected.astype(onp.float32)
  148. elif expected.dtype == onp.int64:
  149. expected = expected.astype(onp.int32)
  150. assert actual.shape == expected.shape
  151. assert actual.dtype == expected.dtype
  152. # Test np.transpose and np.ndarray.transpose
  153. def mnp_transpose(input_tensor):
  154. a = mnp.transpose(input_tensor, (0, 2, 1))
  155. b = mnp.transpose(input_tensor, [2, 1, 0])
  156. c = mnp.transpose(input_tensor, (1, 0, 2))
  157. d = mnp.transpose(input_tensor)
  158. return a, b, c, d
  159. def onp_transpose(input_array):
  160. a = onp.transpose(input_array, (0, 2, 1))
  161. b = onp.transpose(input_array, [2, 1, 0])
  162. c = onp.transpose(input_array, (1, 0, 2))
  163. d = onp.transpose(input_array)
  164. return a, b, c, d
  165. @pytest.mark.level1
  166. @pytest.mark.platform_arm_ascend_training
  167. @pytest.mark.platform_x86_ascend_training
  168. @pytest.mark.platform_x86_gpu_training
  169. @pytest.mark.platform_x86_cpu
  170. @pytest.mark.env_onecard
  171. def test_transpose():
  172. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  173. mnp_array = mnp.asarray(onp_array)
  174. o_transposed = onp_transpose(onp_array)
  175. m_transposed = mnp_transpose(mnp_array)
  176. check_all_results(o_transposed, m_transposed)
  177. # Test np.expand_dims
  178. def mnp_expand_dims(input_tensor):
  179. a = mnp.expand_dims(input_tensor, 0)
  180. b = mnp.expand_dims(input_tensor, -1)
  181. c = mnp.expand_dims(input_tensor, axis=2)
  182. d = mnp.expand_dims(input_tensor, axis=-2)
  183. return a, b, c, d
  184. def onp_expand_dims(input_array):
  185. a = onp.expand_dims(input_array, 0)
  186. b = onp.expand_dims(input_array, -1)
  187. c = onp.expand_dims(input_array, axis=2)
  188. d = onp.expand_dims(input_array, axis=-2)
  189. return a, b, c, d
  190. @pytest.mark.level1
  191. @pytest.mark.platform_arm_ascend_training
  192. @pytest.mark.platform_x86_ascend_training
  193. @pytest.mark.platform_x86_gpu_training
  194. @pytest.mark.platform_x86_cpu
  195. @pytest.mark.env_onecard
  196. def test_expand_dims():
  197. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  198. mnp_array = mnp.asarray(onp_array)
  199. o_expanded = onp_expand_dims(onp_array)
  200. m_expanded = mnp_expand_dims(mnp_array)
  201. check_all_results(o_expanded, m_expanded)
  202. # Test np.squeeze
  203. def mnp_squeeze(input_tensor):
  204. a = mnp.squeeze(input_tensor)
  205. b = mnp.squeeze(input_tensor, 0)
  206. c = mnp.squeeze(input_tensor, axis=None)
  207. d = mnp.squeeze(input_tensor, axis=-3)
  208. e = mnp.squeeze(input_tensor, (2,))
  209. f = mnp.squeeze(input_tensor, (0, 2))
  210. return a, b, c, d, e, f
  211. def onp_squeeze(input_array):
  212. a = onp.squeeze(input_array)
  213. b = onp.squeeze(input_array, 0)
  214. c = onp.squeeze(input_array, axis=None)
  215. d = onp.squeeze(input_array, axis=-3)
  216. e = onp.squeeze(input_array, (2,))
  217. f = onp.squeeze(input_array, (0, 2))
  218. return a, b, c, d, e, f
  219. @pytest.mark.level1
  220. @pytest.mark.platform_arm_ascend_training
  221. @pytest.mark.platform_x86_ascend_training
  222. @pytest.mark.platform_x86_gpu_training
  223. @pytest.mark.platform_x86_cpu
  224. @pytest.mark.env_onecard
  225. def test_squeeze():
  226. onp_array = onp.random.random((1, 3, 1, 4, 2)).astype('float32')
  227. mnp_array = mnp.asarray(onp_array)
  228. o_squeezed = onp_squeeze(onp_array)
  229. m_squeezed = mnp_squeeze(mnp_array)
  230. check_all_results(o_squeezed, m_squeezed)
  231. onp_array = onp.random.random((1, 1, 1, 1, 1)).astype('float32')
  232. mnp_array = mnp.asarray(onp_array)
  233. o_squeezed = onp_squeeze(onp_array)
  234. m_squeezed = mnp_squeeze(mnp_array)
  235. check_all_results(o_squeezed, m_squeezed)
  236. # Test np.rollaxis
  237. def mnp_rollaxis(input_tensor):
  238. a = mnp.rollaxis(input_tensor, 0, 1)
  239. b = mnp.rollaxis(input_tensor, 0, 2)
  240. c = mnp.rollaxis(input_tensor, 2, 1)
  241. d = mnp.rollaxis(input_tensor, 2, 2)
  242. e = mnp.rollaxis(input_tensor, 0)
  243. f = mnp.rollaxis(input_tensor, 1)
  244. return a, b, c, d, e, f
  245. def onp_rollaxis(input_array):
  246. a = onp.rollaxis(input_array, 0, 1)
  247. b = onp.rollaxis(input_array, 0, 2)
  248. c = onp.rollaxis(input_array, 2, 1)
  249. d = onp.rollaxis(input_array, 2, 2)
  250. e = onp.rollaxis(input_array, 0)
  251. f = onp.rollaxis(input_array, 1)
  252. return a, b, c, d, e, f
  253. @pytest.mark.level1
  254. @pytest.mark.platform_arm_ascend_training
  255. @pytest.mark.platform_x86_ascend_training
  256. @pytest.mark.platform_x86_gpu_training
  257. @pytest.mark.platform_x86_cpu
  258. @pytest.mark.env_onecard
  259. def test_rollaxis():
  260. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  261. mnp_array = mnp.asarray(onp_array)
  262. o_rolled = onp_rollaxis(onp_array)
  263. m_rolled = mnp_rollaxis(mnp_array)
  264. check_all_results(o_rolled, m_rolled)
  265. # Test np.swapaxes
  266. def mnp_swapaxes(input_tensor):
  267. a = mnp.swapaxes(input_tensor, 0, 1)
  268. b = mnp.swapaxes(input_tensor, 1, 0)
  269. c = mnp.swapaxes(input_tensor, 1, 1)
  270. d = mnp.swapaxes(input_tensor, 2, 1)
  271. e = mnp.swapaxes(input_tensor, 1, 2)
  272. f = mnp.swapaxes(input_tensor, 2, 2)
  273. return a, b, c, d, e, f
  274. def onp_swapaxes(input_array):
  275. a = onp.swapaxes(input_array, 0, 1)
  276. b = onp.swapaxes(input_array, 1, 0)
  277. c = onp.swapaxes(input_array, 1, 1)
  278. d = onp.swapaxes(input_array, 2, 1)
  279. e = onp.swapaxes(input_array, 1, 2)
  280. f = onp.swapaxes(input_array, 2, 2)
  281. return a, b, c, d, e, f
  282. @pytest.mark.level1
  283. @pytest.mark.platform_arm_ascend_training
  284. @pytest.mark.platform_x86_ascend_training
  285. @pytest.mark.platform_x86_gpu_training
  286. @pytest.mark.platform_x86_cpu
  287. @pytest.mark.env_onecard
  288. def test_swapaxes():
  289. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  290. mnp_array = mnp.asarray(onp_array)
  291. o_swaped = onp_swapaxes(onp_array)
  292. m_swaped = mnp_swapaxes(mnp_array)
  293. check_all_results(o_swaped, m_swaped)
  294. # Test np.reshape
  295. def mnp_reshape(input_tensor):
  296. a = mnp.reshape(input_tensor, (3, 8))
  297. b = mnp.reshape(input_tensor, [3, -1])
  298. c = mnp.reshape(input_tensor, (-1, 12))
  299. d = mnp.reshape(input_tensor, (-1,))
  300. e = mnp.reshape(input_tensor, 24)
  301. f = mnp.reshape(input_tensor, [2, 4, -1])
  302. g = input_tensor.reshape(3, 8)
  303. h = input_tensor.reshape(3, -1)
  304. i = input_tensor.reshape([-1, 3])
  305. j = input_tensor.reshape(-1)
  306. return a, b, c, d, e, f, g, h, i, j
  307. def onp_reshape(input_array):
  308. a = onp.reshape(input_array, (3, 8))
  309. b = onp.reshape(input_array, [3, -1])
  310. c = onp.reshape(input_array, (-1, 12))
  311. d = onp.reshape(input_array, (-1,))
  312. e = onp.reshape(input_array, 24)
  313. f = onp.reshape(input_array, [2, 4, -1])
  314. g = input_array.reshape(3, 8)
  315. h = input_array.reshape(3, -1)
  316. i = input_array.reshape([-1, 3])
  317. j = input_array.reshape(-1)
  318. return a, b, c, d, e, f, g, h, i, j
  319. @pytest.mark.level1
  320. @pytest.mark.platform_arm_ascend_training
  321. @pytest.mark.platform_x86_ascend_training
  322. @pytest.mark.platform_x86_gpu_training
  323. @pytest.mark.platform_x86_cpu
  324. @pytest.mark.env_onecard
  325. def test_reshape():
  326. onp_array = onp.random.random((2, 3, 4)).astype('float32')
  327. mnp_array = mnp.asarray(onp_array)
  328. o_reshaped = onp_reshape(onp_array)
  329. m_reshaped = mnp_reshape(mnp_array)
  330. check_all_results(o_reshaped, m_reshaped)
  331. # Test np.ravel
  332. def mnp_ravel(input_tensor):
  333. a = mnp.ravel(input_tensor)
  334. return a
  335. def onp_ravel(input_array):
  336. a = onp.ravel(input_array)
  337. return a
  338. @pytest.mark.level1
  339. @pytest.mark.platform_arm_ascend_training
  340. @pytest.mark.platform_x86_ascend_training
  341. @pytest.mark.platform_x86_gpu_training
  342. @pytest.mark.platform_x86_cpu
  343. @pytest.mark.env_onecard
  344. def test_ravel():
  345. onp_array = onp.random.random((2, 3, 4)).astype('float32')
  346. mnp_array = mnp.asarray(onp_array)
  347. o_ravel = onp_ravel(onp_array)
  348. m_ravel = mnp_ravel(mnp_array).asnumpy()
  349. match_array(o_ravel, m_ravel)
  350. # Test np.concatenate
  351. def mnp_concatenate(input_tensor):
  352. a = mnp.concatenate(input_tensor, None)
  353. b = mnp.concatenate(input_tensor, 0)
  354. c = mnp.concatenate(input_tensor, 1)
  355. d = mnp.concatenate(input_tensor, 2)
  356. return a, b, c, d
  357. def onp_concatenate(input_array):
  358. a = onp.concatenate(input_array, None)
  359. b = onp.concatenate(input_array, 0)
  360. c = onp.concatenate(input_array, 1)
  361. d = onp.concatenate(input_array, 2)
  362. return a, b, c, d
  363. @pytest.mark.level1
  364. @pytest.mark.platform_arm_ascend_training
  365. @pytest.mark.platform_x86_ascend_training
  366. @pytest.mark.platform_x86_gpu_training
  367. @pytest.mark.platform_x86_cpu
  368. @pytest.mark.env_onecard
  369. def test_concatenate():
  370. onp_array = onp.random.random((5, 4, 3, 2)).astype('float32')
  371. mnp_array = mnp.asarray(onp_array)
  372. o_concatenate = onp_concatenate(onp_array)
  373. m_concatenate = mnp_concatenate(mnp_array)
  374. check_all_results(o_concatenate, m_concatenate)
  375. def construct_arrays(n=1, ndim=1, axis=None, low=1, high=5):
  376. onp_array_lst = []
  377. mnp_array_lst = []
  378. shape = onp.random.randint(low=low, high=high, size=ndim)
  379. new_shape = [sh for sh in shape]
  380. while n > 0:
  381. n -= 1
  382. onp_array1 = onp.random.randint(
  383. low=low, high=high, size=shape).astype(onp.float32)
  384. onp_array_lst.append(onp_array1)
  385. mnp_array_lst.append(mnp.asarray(onp_array1))
  386. if axis is not None and axis < ndim:
  387. new_shape[axis] += onp.random.randint(2)
  388. onp_array2 = onp.random.randint(
  389. low=low, high=high, size=new_shape).astype(onp.float32)
  390. onp_array_lst.append(onp_array2)
  391. mnp_array_lst.append(mnp.asarray(onp_array2))
  392. return onp_array_lst, mnp_array_lst
  393. # Test np.xstack
  394. def prepare_array_sequences(n_lst, ndim_lst, axis=None, low=1, high=5):
  395. onp_seq_lst = []
  396. mnp_seq_lst = []
  397. for n in n_lst:
  398. for ndim in ndim_lst:
  399. onp_array_lst, mnp_array_lst = construct_arrays(
  400. n=n, ndim=ndim, axis=axis, low=low, high=high)
  401. onp_seq_lst.append(onp_array_lst)
  402. mnp_seq_lst.append(mnp_array_lst)
  403. return onp_seq_lst, mnp_seq_lst
  404. def mnp_column_stack(input_tensor):
  405. return mnp.column_stack(input_tensor)
  406. def onp_column_stack(input_array):
  407. return onp.column_stack(input_array)
  408. @pytest.mark.level1
  409. @pytest.mark.platform_arm_ascend_training
  410. @pytest.mark.platform_x86_ascend_training
  411. @pytest.mark.platform_x86_gpu_training
  412. @pytest.mark.platform_x86_cpu
  413. @pytest.mark.env_onecard
  414. def test_column_stack():
  415. onp_seq_lst, mnp_seq_lst = prepare_array_sequences(
  416. n_lst=[1, 5], ndim_lst=[1, 2, 3, 4], axis=1)
  417. for i, onp_seq in enumerate(onp_seq_lst):
  418. onp_seq = onp_seq_lst[i]
  419. mnp_seq = mnp_seq_lst[i]
  420. o_column_stack = onp_column_stack(onp_seq)
  421. m_column_stack = mnp_column_stack(mnp_seq)
  422. check_all_results(o_column_stack, m_column_stack)
  423. def mnp_hstack(input_tensor):
  424. return mnp.hstack(input_tensor)
  425. def onp_hstack(input_array):
  426. return onp.hstack(input_array)
  427. @pytest.mark.level1
  428. @pytest.mark.platform_arm_ascend_training
  429. @pytest.mark.platform_x86_ascend_training
  430. @pytest.mark.platform_x86_gpu_training
  431. @pytest.mark.platform_x86_cpu
  432. @pytest.mark.env_onecard
  433. def test_hstack():
  434. onp_seq_lst0, mnp_seq_lst0 = prepare_array_sequences(
  435. n_lst=[1, 5], ndim_lst=[2, 3, 4], axis=1)
  436. onp_seq_lst1, mnp_seq_lst1 = prepare_array_sequences(
  437. n_lst=[1, 5], ndim_lst=[1], axis=0)
  438. onp_seq_lst = onp_seq_lst0 + onp_seq_lst1
  439. mnp_seq_lst = mnp_seq_lst0 + mnp_seq_lst1
  440. for i, onp_seq in enumerate(onp_seq_lst):
  441. mnp_seq = mnp_seq_lst[i]
  442. o_hstack = onp_hstack(onp_seq)
  443. m_hstack = mnp_hstack(mnp_seq)
  444. check_all_results(o_hstack, m_hstack)
  445. def mnp_dstack(input_tensor):
  446. return mnp.dstack(input_tensor)
  447. def onp_dstack(input_array):
  448. return onp.dstack(input_array)
  449. @pytest.mark.level1
  450. @pytest.mark.platform_arm_ascend_training
  451. @pytest.mark.platform_x86_ascend_training
  452. @pytest.mark.platform_x86_gpu_training
  453. @pytest.mark.platform_x86_cpu
  454. @pytest.mark.env_onecard
  455. def test_dstack():
  456. onp_seq_lst, mnp_seq_lst = prepare_array_sequences(
  457. n_lst=[1, 5], ndim_lst=[1, 2, 3, 4], axis=2)
  458. for i, onp_seq in enumerate(onp_seq_lst):
  459. mnp_seq = mnp_seq_lst[i]
  460. o_dstack = onp_dstack(onp_seq)
  461. m_dstack = mnp_dstack(mnp_seq)
  462. check_all_results(o_dstack, m_dstack)
  463. def mnp_vstack(input_tensor):
  464. return mnp.vstack(input_tensor)
  465. def onp_vstack(input_array):
  466. return onp.vstack(input_array)
  467. @pytest.mark.level1
  468. @pytest.mark.platform_arm_ascend_training
  469. @pytest.mark.platform_x86_ascend_training
  470. @pytest.mark.platform_x86_gpu_training
  471. @pytest.mark.platform_x86_cpu
  472. @pytest.mark.env_onecard
  473. def test_vstack():
  474. onp_seq_lst0, mnp_seq_lst0 = prepare_array_sequences(
  475. n_lst=[1, 5], ndim_lst=[2, 3, 4], axis=0)
  476. onp_seq_lst1, mnp_seq_lst1 = prepare_array_sequences(
  477. n_lst=[1, 5], ndim_lst=[1])
  478. onp_seq_lst = onp_seq_lst0 + onp_seq_lst1
  479. mnp_seq_lst = mnp_seq_lst0 + mnp_seq_lst1
  480. for i, onp_seq in enumerate(onp_seq_lst):
  481. mnp_seq = mnp_seq_lst[i]
  482. o_vstack = onp_vstack(onp_seq)
  483. m_vstack = mnp_vstack(mnp_seq)
  484. check_all_results(o_vstack, m_vstack)
  485. # Test np.atleastxd
  486. def mnp_atleast1d(*arys):
  487. return mnp.atleast_1d(*arys)
  488. def onp_atleast1d(*arys):
  489. return onp.atleast_1d(*arys)
  490. def mnp_atleast2d(*arys):
  491. return mnp.atleast_2d(*arys)
  492. def onp_atleast2d(*arys):
  493. return onp.atleast_2d(*arys)
  494. def mnp_atleast3d(*arys):
  495. return mnp.atleast_3d(*arys)
  496. def onp_atleast3d(*arys):
  497. return onp.atleast_3d(*arys)
  498. @pytest.mark.level1
  499. @pytest.mark.platform_arm_ascend_training
  500. @pytest.mark.platform_x86_ascend_training
  501. @pytest.mark.platform_x86_gpu_training
  502. @pytest.mark.platform_x86_cpu
  503. @pytest.mark.env_onecard
  504. def test_atleast1d():
  505. run_non_kw_test(mnp_atleast1d, onp_atleast1d)
  506. @pytest.mark.level1
  507. @pytest.mark.platform_arm_ascend_training
  508. @pytest.mark.platform_x86_ascend_training
  509. @pytest.mark.platform_x86_gpu_training
  510. @pytest.mark.platform_x86_cpu
  511. @pytest.mark.env_onecard
  512. def test_atleast2d():
  513. run_non_kw_test(mnp_atleast2d, onp_atleast2d)
  514. @pytest.mark.level1
  515. @pytest.mark.platform_arm_ascend_training
  516. @pytest.mark.platform_x86_ascend_training
  517. @pytest.mark.platform_x86_gpu_training
  518. @pytest.mark.platform_x86_cpu
  519. @pytest.mark.env_onecard
  520. def test_atleast3d():
  521. run_non_kw_test(mnp_atleast3d, onp_atleast3d)
  522. # Test np.where
  523. def mnp_where(condition, x, y):
  524. return mnp.where(condition, x, y)
  525. def onp_where(condition, x, y):
  526. return onp.where(condition, x, y)
  527. @pytest.mark.level1
  528. @pytest.mark.platform_arm_ascend_training
  529. @pytest.mark.platform_x86_ascend_training
  530. @pytest.mark.platform_x86_gpu_training
  531. @pytest.mark.platform_x86_cpu
  532. @pytest.mark.env_onecard
  533. def test_where():
  534. test_case = Cases()
  535. for condition1 in test_case.bool_broadcastables[:2]:
  536. for x in test_case.broadcastables[:2]:
  537. for y in test_case.broadcastables[:2]:
  538. for condition2 in test_case.broadcastables[:2]:
  539. match_res(mnp_where, onp_where, condition1, x, y)
  540. match_res(mnp_where, onp_where, condition2, x, y)
  541. # Test ndarray.flatten
  542. def mnp_ndarray_flatten(input_tensor):
  543. a = input_tensor.flatten()
  544. b = input_tensor.flatten(order='F')
  545. c = input_tensor.flatten(order='C')
  546. return a, b, c
  547. def onp_ndarray_flatten(input_array):
  548. a = input_array.flatten()
  549. b = input_array.flatten(order='F')
  550. c = input_array.flatten(order='C')
  551. return a, b, c
  552. @pytest.mark.level1
  553. @pytest.mark.platform_arm_ascend_training
  554. @pytest.mark.platform_x86_ascend_training
  555. @pytest.mark.platform_x86_gpu_training
  556. @pytest.mark.platform_x86_cpu
  557. @pytest.mark.env_onecard
  558. def test_ndarray_flatten():
  559. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  560. mnp_array = mnp.asarray(onp_array)
  561. o_flatten = onp_ndarray_flatten(onp_array)
  562. m_flatten = mnp_ndarray_flatten(mnp_array)
  563. check_all_results(o_flatten, m_flatten)
  564. # Test ndarray.transpose
  565. def mnp_ndarray_transpose(input_tensor):
  566. a = input_tensor.T
  567. b = input_tensor.transpose()
  568. c = input_tensor.transpose((0, 2, 1))
  569. d = input_tensor.transpose([0, 2, 1])
  570. return a, b, c, d
  571. def onp_ndarray_transpose(input_array):
  572. a = input_array.T
  573. b = input_array.transpose()
  574. c = input_array.transpose((0, 2, 1))
  575. d = input_array.transpose([0, 2, 1])
  576. return a, b, c, d
  577. @pytest.mark.level1
  578. @pytest.mark.platform_arm_ascend_training
  579. @pytest.mark.platform_x86_ascend_training
  580. @pytest.mark.platform_x86_gpu_training
  581. @pytest.mark.platform_x86_cpu
  582. @pytest.mark.env_onecard
  583. def test_ndarray_transpose():
  584. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  585. mnp_array = mnp.asarray(onp_array)
  586. o_transposed = onp_ndarray_transpose(onp_array)
  587. m_transposed = mnp_ndarray_transpose(mnp_array)
  588. check_all_results(o_transposed, m_transposed)
  589. # Test ndarray.astype
  590. def mnp_ndarray_astype(input_tensor):
  591. a = input_tensor.astype("float16")
  592. b = input_tensor.astype(onp.float64)
  593. c = input_tensor.astype(mnp.bool_)
  594. return a, b, c
  595. def onp_ndarray_astype(input_array):
  596. a = input_array.astype("float16")
  597. b = input_array.astype(onp.float64)
  598. c = input_array.astype(onp.bool_)
  599. return a, b, c
  600. @pytest.mark.level1
  601. @pytest.mark.platform_arm_ascend_training
  602. @pytest.mark.platform_x86_ascend_training
  603. @pytest.mark.platform_x86_gpu_training
  604. @pytest.mark.platform_x86_cpu
  605. @pytest.mark.env_onecard
  606. def test_ndarray_astype():
  607. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  608. mnp_array = mnp.asarray(onp_array)
  609. o_astype = onp_ndarray_astype(onp_array)
  610. m_astype = mnp_ndarray_astype(mnp_array)
  611. for arr1, arr2 in zip(o_astype, m_astype):
  612. assert arr1.dtype == arr2.asnumpy().dtype
  613. def onp_concatenate_type_promotion(onp_array1, onp_array2, onp_array3, onp_array4):
  614. o_concatenate = onp.concatenate((onp_array1,
  615. onp_array2,
  616. onp_array3,
  617. onp_array4), -1)
  618. return o_concatenate
  619. def mnp_concatenate_type_promotion(mnp_array1, mnp_array2, mnp_array3, mnp_array4):
  620. m_concatenate = mnp.concatenate([mnp_array1,
  621. mnp_array2,
  622. mnp_array3,
  623. mnp_array4], -1)
  624. return m_concatenate
  625. @pytest.mark.level1
  626. @pytest.mark.platform_arm_ascend_training
  627. @pytest.mark.platform_x86_ascend_training
  628. @pytest.mark.platform_x86_gpu_training
  629. @pytest.mark.platform_x86_cpu
  630. @pytest.mark.env_onecard
  631. def test_concatenate_type_promotion():
  632. onp_array = onp.random.random((5, 1)).astype('float32')
  633. mnp_array = mnp.asarray(onp_array)
  634. onp_array1 = onp_array.astype(onp.float16)
  635. onp_array2 = onp_array.astype(onp.bool_)
  636. onp_array3 = onp_array.astype(onp.float32)
  637. onp_array4 = onp_array.astype(onp.int32)
  638. mnp_array1 = mnp_array.astype(onp.float16)
  639. mnp_array2 = mnp_array.astype(onp.bool_)
  640. mnp_array3 = mnp_array.astype(onp.float32)
  641. mnp_array4 = mnp_array.astype(onp.int32)
  642. o_concatenate = onp_concatenate_type_promotion(
  643. onp_array1, onp_array2, onp_array3, onp_array4).astype('float32')
  644. m_concatenate = mnp_concatenate_type_promotion(
  645. mnp_array1, mnp_array2, mnp_array3, mnp_array4)
  646. check_all_results(o_concatenate, m_concatenate, error=1e-7)
  647. def mnp_stack(*arrs):
  648. a = mnp.stack(arrs, axis=-4)
  649. b = mnp.stack(arrs, axis=-3)
  650. c = mnp.stack(arrs, axis=0)
  651. d = mnp.stack(arrs, axis=3)
  652. e = mnp.stack(arrs, axis=2)
  653. return a, b, c, d, e
  654. def onp_stack(*arrs):
  655. a = onp.stack(arrs, axis=-4)
  656. b = onp.stack(arrs, axis=-3)
  657. c = onp.stack(arrs, axis=0)
  658. d = onp.stack(arrs, axis=3)
  659. e = onp.stack(arrs, axis=2)
  660. return a, b, c, d, e
  661. @pytest.mark.level1
  662. @pytest.mark.platform_arm_ascend_training
  663. @pytest.mark.platform_x86_ascend_training
  664. @pytest.mark.platform_x86_gpu_training
  665. @pytest.mark.platform_x86_cpu
  666. @pytest.mark.env_onecard
  667. def test_stack():
  668. arr = rand_int(3, 4, 5, 6)
  669. match_res(mnp.stack, onp.stack, arr)
  670. for i in range(-4, 4):
  671. match_res(mnp.stack, onp.stack, arr, axis=i)
  672. arr = rand_int(7, 4, 0, 3)
  673. match_res(mnp.stack, onp.stack, arr)
  674. for i in range(-4, 4):
  675. match_res(mnp.stack, onp.stack, arr, axis=i)
  676. arrs = [rand_int(3, 4, 5) for i in range(10)]
  677. match_res(mnp.stack, onp.stack, arrs)
  678. match_res(mnp.stack, onp.stack, tuple(arrs))
  679. match_res(mnp_stack, onp_stack, *arrs)
  680. for i in range(-4, 4):
  681. match_res(mnp.stack, onp.stack, arrs, axis=i)
  682. arrs = [rand_int(3, 0, 5, 8, 0) for i in range(5)]
  683. match_res(mnp.stack, onp.stack, arrs)
  684. match_res(mnp.stack, onp.stack, tuple(arrs))
  685. match_res(mnp_stack, onp_stack, *arrs)
  686. for i in range(-6, 6):
  687. match_res(mnp.stack, onp.stack, arrs, axis=i)
  688. class ReshapeExpandSqueeze(Cell):
  689. def __init__(self):
  690. super(ReshapeExpandSqueeze, self).__init__()
  691. def construct(self, x):
  692. x = mnp.expand_dims(x, 2)
  693. x = mnp.reshape(x, (1, 2, 3, 4, 1, 1))
  694. x = mnp.squeeze(x)
  695. return x
  696. class TransposeConcatRavel(Cell):
  697. def __init__(self):
  698. super(TransposeConcatRavel, self).__init__()
  699. def construct(self, x1, x2, x3):
  700. x1 = mnp.transpose(x1, [0, 2, 1])
  701. x2 = x2.transpose(0, 2, 1)
  702. x = mnp.concatenate((x1, x2, x3), -1)
  703. x = mnp.ravel(x)
  704. return x
  705. class RollSwap(Cell):
  706. def __init__(self):
  707. super(RollSwap, self).__init__()
  708. def construct(self, x):
  709. x = mnp.rollaxis(x, 2)
  710. x = mnp.swapaxes(x, 0, 1)
  711. return x
  712. test_case_array_ops = [
  713. ('ReshapeExpandSqueeze', {
  714. 'block': ReshapeExpandSqueeze(),
  715. 'desc_inputs': [mnp.ones((2, 3, 4))]}),
  716. ('TransposeConcatRavel', {
  717. 'block': TransposeConcatRavel(),
  718. 'desc_inputs': [mnp.ones((2, 3, 4)),
  719. mnp.ones((2, 3, 4)),
  720. mnp.ones((2, 4, 1))]}),
  721. ('RollSwap', {
  722. 'block': RollSwap(),
  723. 'desc_inputs': [mnp.ones((2, 3, 4))]})
  724. ]
  725. test_case_lists = [test_case_array_ops]
  726. test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists)
  727. # use -k to select certain testcast
  728. # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm
  729. @pytest.mark.level1
  730. @pytest.mark.platform_arm_ascend_training
  731. @pytest.mark.platform_x86_ascend_training
  732. @pytest.mark.platform_x86_gpu_training
  733. @pytest.mark.platform_x86_cpu
  734. @pytest.mark.env_onecard
  735. def test_expand_dims_exception():
  736. with pytest.raises(TypeError):
  737. mnp.expand_dims(mnp.ones((3, 3)), 1.2)
  738. @pytest.mark.level1
  739. @pytest.mark.platform_arm_ascend_training
  740. @pytest.mark.platform_x86_ascend_training
  741. @pytest.mark.platform_x86_gpu_training
  742. @pytest.mark.platform_x86_cpu
  743. @pytest.mark.env_onecard
  744. def test_swapaxes_exception():
  745. with pytest.raises(ValueError):
  746. mnp.swapaxes(mnp.ones((3, 3)), 1, 10)
  747. @pytest.mark.level1
  748. @pytest.mark.platform_arm_ascend_training
  749. @pytest.mark.platform_x86_ascend_training
  750. @pytest.mark.platform_x86_gpu_training
  751. @pytest.mark.platform_x86_cpu
  752. @pytest.mark.env_onecard
  753. def test_tensor_flatten():
  754. lst = [[1.0, 2.0], [3.0, 4.0]]
  755. tensor_list = mnp.asarray(lst)
  756. assert tensor_list.flatten().asnumpy().tolist() == [1.0, 2.0, 3.0, 4.0]
  757. assert tensor_list.flatten(order='F').asnumpy().tolist() == [
  758. 1.0, 3.0, 2.0, 4.0]
  759. @pytest.mark.level1
  760. @pytest.mark.platform_arm_ascend_training
  761. @pytest.mark.platform_x86_ascend_training
  762. @pytest.mark.platform_x86_gpu_training
  763. @pytest.mark.platform_x86_cpu
  764. @pytest.mark.env_onecard
  765. def test_tensor_reshape():
  766. lst = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]
  767. tensor_list = mnp.asarray(lst)
  768. with pytest.raises(TypeError):
  769. tensor_list = tensor_list.reshape({0, 1, 2})
  770. with pytest.raises(ValueError):
  771. tensor_list = tensor_list.reshape(1, 2, 3)
  772. assert tensor_list.reshape([-1, 4]).shape == (2, 4)
  773. assert tensor_list.reshape(1, -1, 4).shape == (1, 2, 4)
  774. @pytest.mark.level1
  775. @pytest.mark.platform_arm_ascend_training
  776. @pytest.mark.platform_x86_ascend_training
  777. @pytest.mark.platform_x86_gpu_training
  778. @pytest.mark.platform_x86_cpu
  779. @pytest.mark.env_onecard
  780. def test_tensor_squeeze():
  781. lst = [[[1.0], [2.0], [3.0]]]
  782. tensor_list = mnp.asarray(lst)
  783. with pytest.raises(TypeError):
  784. tensor_list = tensor_list.squeeze(1.2)
  785. with pytest.raises(ValueError):
  786. tensor_list = tensor_list.squeeze(4)
  787. assert tensor_list.squeeze().shape == (3,)
  788. assert tensor_list.squeeze(axis=2).shape == (1, 3)
  789. @pytest.mark.level1
  790. @pytest.mark.platform_arm_ascend_training
  791. @pytest.mark.platform_x86_ascend_training
  792. @pytest.mark.platform_x86_gpu_training
  793. @pytest.mark.platform_x86_cpu
  794. @pytest.mark.env_onecard
  795. def test_tensor_ravel():
  796. lst = [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]
  797. tensor_list = mnp.asarray(lst)
  798. assert tensor_list.ravel().shape == (8,)
  799. assert tensor_list.ravel().asnumpy().tolist() == [
  800. 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]
  801. @pytest.mark.level1
  802. @pytest.mark.platform_arm_ascend_training
  803. @pytest.mark.platform_x86_ascend_training
  804. @pytest.mark.platform_x86_gpu_training
  805. @pytest.mark.platform_x86_cpu
  806. @pytest.mark.env_onecard
  807. def test_tensor_swapaxes():
  808. lst = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]
  809. tensor_list = mnp.asarray(lst)
  810. with pytest.raises(TypeError):
  811. tensor_list = tensor_list.swapaxes(0, (1,))
  812. with pytest.raises(ValueError):
  813. tensor_list = tensor_list.swapaxes(0, 3)
  814. assert tensor_list.swapaxes(0, 1).shape == (3, 2)