You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_array_ops.py 31 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """unit tests for numpy array operations"""
  16. import functools
  17. import pytest
  18. import numpy as onp
  19. import mindspore.numpy as mnp
  20. from mindspore.nn import Cell
  21. class Cases():
  22. def __init__(self):
  23. self.all_shapes = [
  24. 0, 1, 2, (), (1,), (2,), (1, 2, 3), [], [1], [2], [1, 2, 3]
  25. ]
  26. self.onp_dtypes = [onp.int32, 'int32', int,
  27. onp.float32, 'float32', float,
  28. onp.uint32, 'uint32',
  29. onp.bool_, 'bool', bool]
  30. self.mnp_dtypes = [mnp.int32, 'int32', int,
  31. mnp.float32, 'float32', float,
  32. mnp.uint32, 'uint32',
  33. mnp.bool_, 'bool', bool]
  34. self.array_sets = [1, 1.1, True, [1, 0, True], [1, 1.0, 2], (1,),
  35. [(1, 2, 3), (4, 5, 6)], onp.random.random( # pylint: disable=no-member
  36. (100, 100)).astype(onp.float32),
  37. onp.random.random((100, 100)).astype(onp.bool)]
  38. self.arrs = [
  39. rand_int(2),
  40. rand_int(2, 3),
  41. rand_int(2, 3, 4),
  42. rand_int(2, 3, 4, 5),
  43. ]
  44. # scalars expanded across the 0th dimension
  45. self.scalars = [
  46. rand_int(),
  47. rand_int(1),
  48. rand_int(1, 1),
  49. rand_int(1, 1, 1),
  50. ]
  51. # arrays of the same size expanded across the 0th dimension
  52. self.expanded_arrs = [
  53. rand_int(2, 3),
  54. rand_int(1, 2, 3),
  55. rand_int(1, 1, 2, 3),
  56. rand_int(1, 1, 1, 2, 3),
  57. ]
  58. # arrays with dimensions of size 1
  59. self.nested_arrs = [
  60. rand_int(1),
  61. rand_int(1, 2),
  62. rand_int(3, 1, 8),
  63. rand_int(1, 3, 9, 1),
  64. ]
  65. # arrays which can be broadcast
  66. self.broadcastables = [
  67. rand_int(5),
  68. rand_int(6, 1),
  69. rand_int(7, 1, 5),
  70. rand_int(8, 1, 6, 1)
  71. ]
  72. # boolean arrays which can be broadcast
  73. self.bool_broadcastables = [
  74. rand_bool(),
  75. rand_bool(1),
  76. rand_bool(5),
  77. rand_bool(6, 1),
  78. rand_bool(7, 1, 5),
  79. rand_bool(8, 1, 6, 1),
  80. ]
  81. self.mnp_prototypes = [
  82. mnp.ones((2, 3, 4)),
  83. mnp.ones((0, 3, 0, 2, 5)),
  84. onp.ones((2, 7, 0)),
  85. onp.ones(()),
  86. [mnp.ones(3), (1, 2, 3), onp.ones(3), [4, 5, 6]],
  87. ([(1, 2), mnp.ones(2)], (onp.ones(2), [3, 4])),
  88. ]
  89. self.onp_prototypes = [
  90. onp.ones((2, 3, 4)),
  91. onp.ones((0, 3, 0, 2, 5)),
  92. onp.ones((2, 7, 0)),
  93. onp.ones(()),
  94. [onp.ones(3), (1, 2, 3), onp.ones(3), [4, 5, 6]],
  95. ([(1, 2), onp.ones(2)], (onp.ones(2), [3, 4])),
  96. ]
  97. def match_array(actual, expected, error=0):
  98. if error > 0:
  99. onp.testing.assert_almost_equal(actual.tolist(), expected.tolist(),
  100. decimal=error)
  101. else:
  102. onp.testing.assert_equal(actual.tolist(), expected.tolist())
  103. def check_all_results(onp_results, mnp_results, error=0):
  104. """Check all results from numpy and mindspore.numpy"""
  105. for i, _ in enumerate(onp_results):
  106. match_array(onp_results[i], mnp_results[i].asnumpy())
  107. def run_non_kw_test(mnp_fn, onp_fn):
  108. """Run tests on functions with non keyword arguments"""
  109. test_case = Cases()
  110. for i in range(len(test_case.arrs)):
  111. arrs = test_case.arrs[:i]
  112. match_res(mnp_fn, onp_fn, *arrs)
  113. for i in range(len(test_case.scalars)):
  114. arrs = test_case.scalars[:i]
  115. match_res(mnp_fn, onp_fn, *arrs)
  116. for i in range(len(test_case.expanded_arrs)):
  117. arrs = test_case.expanded_arrs[:i]
  118. match_res(mnp_fn, onp_fn, *arrs)
  119. for i in range(len(test_case.nested_arrs)):
  120. arrs = test_case.nested_arrs[:i]
  121. match_res(mnp_fn, onp_fn, *arrs)
  122. def rand_int(*shape):
  123. """return an random integer array with parameter shape"""
  124. res = onp.random.randint(low=1, high=5, size=shape)
  125. if isinstance(res, onp.ndarray):
  126. return res.astype(onp.float32)
  127. return float(res)
  128. # return an random boolean array
  129. def rand_bool(*shape):
  130. return onp.random.rand(*shape) > 0.5
  131. def match_res(mnp_fn, onp_fn, *arrs, **kwargs):
  132. """Checks results from applying mnp_fn and onp_fn on arrs respectively"""
  133. mnp_arrs = map(functools.partial(mnp.asarray, dtype='float32'), arrs)
  134. mnp_res = mnp_fn(*mnp_arrs, **kwargs)
  135. onp_res = onp_fn(*arrs, **kwargs)
  136. match_all_arrays(mnp_res, onp_res)
  137. def match_all_arrays(mnp_res, onp_res, error=0):
  138. if isinstance(mnp_res, (tuple, list)):
  139. assert len(mnp_res) == len(onp_res)
  140. for actual, expected in zip(mnp_res, onp_res):
  141. match_array(actual.asnumpy(), expected, error)
  142. else:
  143. match_array(mnp_res.asnumpy(), onp_res, error)
  144. def match_meta(actual, expected):
  145. # float64 and int64 are not supported, and the default type for
  146. # float and int are float32 and int32, respectively
  147. if expected.dtype == onp.float64:
  148. expected = expected.astype(onp.float32)
  149. elif expected.dtype == onp.int64:
  150. expected = expected.astype(onp.int32)
  151. assert actual.shape == expected.shape
  152. assert actual.dtype == expected.dtype
  153. # Test np.transpose and np.ndarray.transpose
  154. def mnp_transpose(input_tensor):
  155. a = mnp.transpose(input_tensor, (0, 2, 1))
  156. b = mnp.transpose(input_tensor, [2, 1, 0])
  157. c = mnp.transpose(input_tensor, (1, 0, 2))
  158. d = mnp.transpose(input_tensor)
  159. return a, b, c, d
  160. def onp_transpose(input_array):
  161. a = onp.transpose(input_array, (0, 2, 1))
  162. b = onp.transpose(input_array, [2, 1, 0])
  163. c = onp.transpose(input_array, (1, 0, 2))
  164. d = onp.transpose(input_array)
  165. return a, b, c, d
  166. @pytest.mark.level1
  167. @pytest.mark.platform_arm_ascend_training
  168. @pytest.mark.platform_x86_ascend_training
  169. @pytest.mark.platform_x86_gpu_training
  170. @pytest.mark.platform_x86_cpu
  171. @pytest.mark.env_onecard
  172. def test_transpose():
  173. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  174. mnp_array = mnp.asarray(onp_array)
  175. o_transposed = onp_transpose(onp_array)
  176. m_transposed = mnp_transpose(mnp_array)
  177. check_all_results(o_transposed, m_transposed)
  178. # Test np.expand_dims
  179. def mnp_expand_dims(input_tensor):
  180. a = mnp.expand_dims(input_tensor, 0)
  181. b = mnp.expand_dims(input_tensor, -1)
  182. c = mnp.expand_dims(input_tensor, axis=2)
  183. d = mnp.expand_dims(input_tensor, axis=-2)
  184. return a, b, c, d
  185. def onp_expand_dims(input_array):
  186. a = onp.expand_dims(input_array, 0)
  187. b = onp.expand_dims(input_array, -1)
  188. c = onp.expand_dims(input_array, axis=2)
  189. d = onp.expand_dims(input_array, axis=-2)
  190. return a, b, c, d
  191. @pytest.mark.level1
  192. @pytest.mark.platform_arm_ascend_training
  193. @pytest.mark.platform_x86_ascend_training
  194. @pytest.mark.platform_x86_gpu_training
  195. @pytest.mark.platform_x86_cpu
  196. @pytest.mark.env_onecard
  197. def test_expand_dims():
  198. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  199. mnp_array = mnp.asarray(onp_array)
  200. o_expanded = onp_expand_dims(onp_array)
  201. m_expanded = mnp_expand_dims(mnp_array)
  202. check_all_results(o_expanded, m_expanded)
  203. # Test np.squeeze
  204. def mnp_squeeze(input_tensor):
  205. a = mnp.squeeze(input_tensor)
  206. b = mnp.squeeze(input_tensor, 0)
  207. c = mnp.squeeze(input_tensor, axis=None)
  208. d = mnp.squeeze(input_tensor, axis=-3)
  209. e = mnp.squeeze(input_tensor, (2,))
  210. f = mnp.squeeze(input_tensor, (0, 2))
  211. return a, b, c, d, e, f
  212. def onp_squeeze(input_array):
  213. a = onp.squeeze(input_array)
  214. b = onp.squeeze(input_array, 0)
  215. c = onp.squeeze(input_array, axis=None)
  216. d = onp.squeeze(input_array, axis=-3)
  217. e = onp.squeeze(input_array, (2,))
  218. f = onp.squeeze(input_array, (0, 2))
  219. return a, b, c, d, e, f
  220. @pytest.mark.level1
  221. @pytest.mark.platform_arm_ascend_training
  222. @pytest.mark.platform_x86_ascend_training
  223. @pytest.mark.platform_x86_gpu_training
  224. @pytest.mark.platform_x86_cpu
  225. @pytest.mark.env_onecard
  226. def test_squeeze():
  227. onp_array = onp.random.random((1, 3, 1, 4, 2)).astype('float32')
  228. mnp_array = mnp.asarray(onp_array)
  229. o_squeezed = onp_squeeze(onp_array)
  230. m_squeezed = mnp_squeeze(mnp_array)
  231. check_all_results(o_squeezed, m_squeezed)
  232. onp_array = onp.random.random((1, 1, 1, 1, 1)).astype('float32')
  233. mnp_array = mnp.asarray(onp_array)
  234. o_squeezed = onp_squeeze(onp_array)
  235. m_squeezed = mnp_squeeze(mnp_array)
  236. check_all_results(o_squeezed, m_squeezed)
  237. # Test np.rollaxis
  238. def mnp_rollaxis(input_tensor):
  239. a = mnp.rollaxis(input_tensor, 0, 1)
  240. b = mnp.rollaxis(input_tensor, 0, 2)
  241. c = mnp.rollaxis(input_tensor, 2, 1)
  242. d = mnp.rollaxis(input_tensor, 2, 2)
  243. e = mnp.rollaxis(input_tensor, 0)
  244. f = mnp.rollaxis(input_tensor, 1)
  245. return a, b, c, d, e, f
  246. def onp_rollaxis(input_array):
  247. a = onp.rollaxis(input_array, 0, 1)
  248. b = onp.rollaxis(input_array, 0, 2)
  249. c = onp.rollaxis(input_array, 2, 1)
  250. d = onp.rollaxis(input_array, 2, 2)
  251. e = onp.rollaxis(input_array, 0)
  252. f = onp.rollaxis(input_array, 1)
  253. return a, b, c, d, e, f
  254. @pytest.mark.level1
  255. @pytest.mark.platform_arm_ascend_training
  256. @pytest.mark.platform_x86_ascend_training
  257. @pytest.mark.platform_x86_gpu_training
  258. @pytest.mark.platform_x86_cpu
  259. @pytest.mark.env_onecard
  260. def test_rollaxis():
  261. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  262. mnp_array = mnp.asarray(onp_array)
  263. o_rolled = onp_rollaxis(onp_array)
  264. m_rolled = mnp_rollaxis(mnp_array)
  265. check_all_results(o_rolled, m_rolled)
  266. # Test np.swapaxes
  267. def mnp_swapaxes(input_tensor):
  268. a = mnp.swapaxes(input_tensor, 0, 1)
  269. b = mnp.swapaxes(input_tensor, 1, 0)
  270. c = mnp.swapaxes(input_tensor, 1, 1)
  271. d = mnp.swapaxes(input_tensor, 2, 1)
  272. e = mnp.swapaxes(input_tensor, 1, 2)
  273. f = mnp.swapaxes(input_tensor, 2, 2)
  274. return a, b, c, d, e, f
  275. def onp_swapaxes(input_array):
  276. a = onp.swapaxes(input_array, 0, 1)
  277. b = onp.swapaxes(input_array, 1, 0)
  278. c = onp.swapaxes(input_array, 1, 1)
  279. d = onp.swapaxes(input_array, 2, 1)
  280. e = onp.swapaxes(input_array, 1, 2)
  281. f = onp.swapaxes(input_array, 2, 2)
  282. return a, b, c, d, e, f
  283. @pytest.mark.level1
  284. @pytest.mark.platform_arm_ascend_training
  285. @pytest.mark.platform_x86_ascend_training
  286. @pytest.mark.platform_x86_gpu_training
  287. @pytest.mark.platform_x86_cpu
  288. @pytest.mark.env_onecard
  289. def test_swapaxes():
  290. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  291. mnp_array = mnp.asarray(onp_array)
  292. o_swaped = onp_swapaxes(onp_array)
  293. m_swaped = mnp_swapaxes(mnp_array)
  294. check_all_results(o_swaped, m_swaped)
  295. # Test np.reshape
  296. def mnp_reshape(input_tensor):
  297. a = mnp.reshape(input_tensor, (3, 8))
  298. b = mnp.reshape(input_tensor, [3, -1])
  299. c = mnp.reshape(input_tensor, (-1, 12))
  300. d = mnp.reshape(input_tensor, (-1,))
  301. e = mnp.reshape(input_tensor, 24)
  302. f = mnp.reshape(input_tensor, [2, 4, -1])
  303. g = input_tensor.reshape(3, 8)
  304. h = input_tensor.reshape(3, -1)
  305. i = input_tensor.reshape([-1, 3])
  306. j = input_tensor.reshape(-1)
  307. return a, b, c, d, e, f, g, h, i, j
  308. def onp_reshape(input_array):
  309. a = onp.reshape(input_array, (3, 8))
  310. b = onp.reshape(input_array, [3, -1])
  311. c = onp.reshape(input_array, (-1, 12))
  312. d = onp.reshape(input_array, (-1,))
  313. e = onp.reshape(input_array, 24)
  314. f = onp.reshape(input_array, [2, 4, -1])
  315. g = input_array.reshape(3, 8)
  316. h = input_array.reshape(3, -1)
  317. i = input_array.reshape([-1, 3])
  318. j = input_array.reshape(-1)
  319. return a, b, c, d, e, f, g, h, i, j
  320. @pytest.mark.level1
  321. @pytest.mark.platform_arm_ascend_training
  322. @pytest.mark.platform_x86_ascend_training
  323. @pytest.mark.platform_x86_gpu_training
  324. @pytest.mark.platform_x86_cpu
  325. @pytest.mark.env_onecard
  326. def test_reshape():
  327. onp_array = onp.random.random((2, 3, 4)).astype('float32')
  328. mnp_array = mnp.asarray(onp_array)
  329. o_reshaped = onp_reshape(onp_array)
  330. m_reshaped = mnp_reshape(mnp_array)
  331. check_all_results(o_reshaped, m_reshaped)
  332. # Test np.ravel
  333. def mnp_ravel(input_tensor):
  334. a = mnp.ravel(input_tensor)
  335. return a
  336. def onp_ravel(input_array):
  337. a = onp.ravel(input_array)
  338. return a
  339. @pytest.mark.level1
  340. @pytest.mark.platform_arm_ascend_training
  341. @pytest.mark.platform_x86_ascend_training
  342. @pytest.mark.platform_x86_gpu_training
  343. @pytest.mark.platform_x86_cpu
  344. @pytest.mark.env_onecard
  345. def test_ravel():
  346. onp_array = onp.random.random((2, 3, 4)).astype('float32')
  347. mnp_array = mnp.asarray(onp_array)
  348. o_ravel = onp_ravel(onp_array)
  349. m_ravel = mnp_ravel(mnp_array).asnumpy()
  350. match_array(o_ravel, m_ravel)
  351. # Test np.concatenate
  352. def mnp_concatenate(input_tensor):
  353. a = mnp.concatenate(input_tensor, None)
  354. b = mnp.concatenate(input_tensor, 0)
  355. c = mnp.concatenate(input_tensor, 1)
  356. d = mnp.concatenate(input_tensor, 2)
  357. return a, b, c, d
  358. def onp_concatenate(input_array):
  359. a = onp.concatenate(input_array, None)
  360. b = onp.concatenate(input_array, 0)
  361. c = onp.concatenate(input_array, 1)
  362. d = onp.concatenate(input_array, 2)
  363. return a, b, c, d
  364. @pytest.mark.level1
  365. @pytest.mark.platform_arm_ascend_training
  366. @pytest.mark.platform_x86_ascend_training
  367. @pytest.mark.platform_x86_gpu_training
  368. @pytest.mark.platform_x86_cpu
  369. @pytest.mark.env_onecard
  370. def test_concatenate():
  371. onp_array = onp.random.random((5, 4, 3, 2)).astype('float32')
  372. mnp_array = mnp.asarray(onp_array)
  373. o_concatenate = onp_concatenate(onp_array)
  374. m_concatenate = mnp_concatenate(mnp_array)
  375. check_all_results(o_concatenate, m_concatenate)
  376. def construct_arrays(n=1, ndim=1, axis=None, low=1, high=5):
  377. onp_array_lst = []
  378. mnp_array_lst = []
  379. shape = onp.random.randint(low=low, high=high, size=ndim)
  380. new_shape = [sh for sh in shape]
  381. while n > 0:
  382. n -= 1
  383. onp_array1 = onp.random.randint(
  384. low=low, high=high, size=shape).astype(onp.float32)
  385. onp_array_lst.append(onp_array1)
  386. mnp_array_lst.append(mnp.asarray(onp_array1))
  387. if axis is not None and axis < ndim:
  388. new_shape[axis] += onp.random.randint(2)
  389. onp_array2 = onp.random.randint(
  390. low=low, high=high, size=new_shape).astype(onp.float32)
  391. onp_array_lst.append(onp_array2)
  392. mnp_array_lst.append(mnp.asarray(onp_array2))
  393. return onp_array_lst, mnp_array_lst
  394. # Test np.xstack
  395. def prepare_array_sequences(n_lst, ndim_lst, axis=None, low=1, high=5):
  396. onp_seq_lst = []
  397. mnp_seq_lst = []
  398. for n in n_lst:
  399. for ndim in ndim_lst:
  400. onp_array_lst, mnp_array_lst = construct_arrays(
  401. n=n, ndim=ndim, axis=axis, low=low, high=high)
  402. onp_seq_lst.append(onp_array_lst)
  403. mnp_seq_lst.append(mnp_array_lst)
  404. return onp_seq_lst, mnp_seq_lst
  405. def mnp_column_stack(input_tensor):
  406. return mnp.column_stack(input_tensor)
  407. def onp_column_stack(input_array):
  408. return onp.column_stack(input_array)
  409. @pytest.mark.level1
  410. @pytest.mark.platform_arm_ascend_training
  411. @pytest.mark.platform_x86_ascend_training
  412. @pytest.mark.platform_x86_gpu_training
  413. @pytest.mark.platform_x86_cpu
  414. @pytest.mark.env_onecard
  415. def test_column_stack():
  416. onp_seq_lst, mnp_seq_lst = prepare_array_sequences(
  417. n_lst=[1, 5], ndim_lst=[1, 2, 3, 4], axis=1)
  418. for i, onp_seq in enumerate(onp_seq_lst):
  419. onp_seq = onp_seq_lst[i]
  420. mnp_seq = mnp_seq_lst[i]
  421. o_column_stack = onp_column_stack(onp_seq)
  422. m_column_stack = mnp_column_stack(mnp_seq)
  423. check_all_results(o_column_stack, m_column_stack)
  424. def mnp_hstack(input_tensor):
  425. return mnp.hstack(input_tensor)
  426. def onp_hstack(input_array):
  427. return onp.hstack(input_array)
  428. @pytest.mark.level1
  429. @pytest.mark.platform_arm_ascend_training
  430. @pytest.mark.platform_x86_ascend_training
  431. @pytest.mark.platform_x86_gpu_training
  432. @pytest.mark.platform_x86_cpu
  433. @pytest.mark.env_onecard
  434. def test_hstack():
  435. onp_seq_lst0, mnp_seq_lst0 = prepare_array_sequences(
  436. n_lst=[1, 5], ndim_lst=[2, 3, 4], axis=1)
  437. onp_seq_lst1, mnp_seq_lst1 = prepare_array_sequences(
  438. n_lst=[1, 5], ndim_lst=[1], axis=0)
  439. onp_seq_lst = onp_seq_lst0 + onp_seq_lst1
  440. mnp_seq_lst = mnp_seq_lst0 + mnp_seq_lst1
  441. for i, onp_seq in enumerate(onp_seq_lst):
  442. mnp_seq = mnp_seq_lst[i]
  443. o_hstack = onp_hstack(onp_seq)
  444. m_hstack = mnp_hstack(mnp_seq)
  445. check_all_results(o_hstack, m_hstack)
  446. def mnp_dstack(input_tensor):
  447. return mnp.dstack(input_tensor)
  448. def onp_dstack(input_array):
  449. return onp.dstack(input_array)
  450. @pytest.mark.level1
  451. @pytest.mark.platform_arm_ascend_training
  452. @pytest.mark.platform_x86_ascend_training
  453. @pytest.mark.platform_x86_gpu_training
  454. @pytest.mark.platform_x86_cpu
  455. @pytest.mark.env_onecard
  456. def test_dstack():
  457. onp_seq_lst, mnp_seq_lst = prepare_array_sequences(
  458. n_lst=[1, 5], ndim_lst=[1, 2, 3, 4], axis=2)
  459. for i, onp_seq in enumerate(onp_seq_lst):
  460. mnp_seq = mnp_seq_lst[i]
  461. o_dstack = onp_dstack(onp_seq)
  462. m_dstack = mnp_dstack(mnp_seq)
  463. check_all_results(o_dstack, m_dstack)
  464. def mnp_vstack(input_tensor):
  465. return mnp.vstack(input_tensor)
  466. def onp_vstack(input_array):
  467. return onp.vstack(input_array)
  468. @pytest.mark.level1
  469. @pytest.mark.platform_arm_ascend_training
  470. @pytest.mark.platform_x86_ascend_training
  471. @pytest.mark.platform_x86_gpu_training
  472. @pytest.mark.platform_x86_cpu
  473. @pytest.mark.env_onecard
  474. def test_vstack():
  475. onp_seq_lst0, mnp_seq_lst0 = prepare_array_sequences(
  476. n_lst=[1, 5], ndim_lst=[2, 3, 4], axis=0)
  477. onp_seq_lst1, mnp_seq_lst1 = prepare_array_sequences(
  478. n_lst=[1, 5], ndim_lst=[1])
  479. onp_seq_lst = onp_seq_lst0 + onp_seq_lst1
  480. mnp_seq_lst = mnp_seq_lst0 + mnp_seq_lst1
  481. for i, onp_seq in enumerate(onp_seq_lst):
  482. mnp_seq = mnp_seq_lst[i]
  483. o_vstack = onp_vstack(onp_seq)
  484. m_vstack = mnp_vstack(mnp_seq)
  485. check_all_results(o_vstack, m_vstack)
  486. # Test np.atleastxd
  487. def mnp_atleast1d(*arys):
  488. return mnp.atleast_1d(*arys)
  489. def onp_atleast1d(*arys):
  490. return onp.atleast_1d(*arys)
  491. def mnp_atleast2d(*arys):
  492. return mnp.atleast_2d(*arys)
  493. def onp_atleast2d(*arys):
  494. return onp.atleast_2d(*arys)
  495. def mnp_atleast3d(*arys):
  496. return mnp.atleast_3d(*arys)
  497. def onp_atleast3d(*arys):
  498. return onp.atleast_3d(*arys)
  499. @pytest.mark.level1
  500. @pytest.mark.platform_arm_ascend_training
  501. @pytest.mark.platform_x86_ascend_training
  502. @pytest.mark.platform_x86_gpu_training
  503. @pytest.mark.platform_x86_cpu
  504. @pytest.mark.env_onecard
  505. def test_atleast1d():
  506. run_non_kw_test(mnp_atleast1d, onp_atleast1d)
  507. @pytest.mark.level1
  508. @pytest.mark.platform_arm_ascend_training
  509. @pytest.mark.platform_x86_ascend_training
  510. @pytest.mark.platform_x86_gpu_training
  511. @pytest.mark.platform_x86_cpu
  512. @pytest.mark.env_onecard
  513. def test_atleast2d():
  514. run_non_kw_test(mnp_atleast2d, onp_atleast2d)
  515. @pytest.mark.level1
  516. @pytest.mark.platform_arm_ascend_training
  517. @pytest.mark.platform_x86_ascend_training
  518. @pytest.mark.platform_x86_gpu_training
  519. @pytest.mark.platform_x86_cpu
  520. @pytest.mark.env_onecard
  521. def test_atleast3d():
  522. run_non_kw_test(mnp_atleast3d, onp_atleast3d)
  523. # Test np.where
  524. def mnp_where(condition, x, y):
  525. return mnp.where(condition, x, y)
  526. def onp_where(condition, x, y):
  527. return onp.where(condition, x, y)
  528. @pytest.mark.level1
  529. @pytest.mark.platform_arm_ascend_training
  530. @pytest.mark.platform_x86_ascend_training
  531. @pytest.mark.platform_x86_gpu_training
  532. @pytest.mark.platform_x86_cpu
  533. @pytest.mark.env_onecard
  534. def test_where():
  535. test_case = Cases()
  536. for condition1 in test_case.bool_broadcastables[:2]:
  537. for x in test_case.broadcastables[:2]:
  538. for y in test_case.broadcastables[:2]:
  539. for condition2 in test_case.broadcastables[:2]:
  540. match_res(mnp_where, onp_where, condition1, x, y)
  541. match_res(mnp_where, onp_where, condition2, x, y)
  542. # Test ndarray.flatten
  543. def mnp_ndarray_flatten(input_tensor):
  544. a = input_tensor.flatten()
  545. b = input_tensor.flatten(order='F')
  546. c = input_tensor.flatten(order='C')
  547. return a, b, c
  548. def onp_ndarray_flatten(input_array):
  549. a = input_array.flatten()
  550. b = input_array.flatten(order='F')
  551. c = input_array.flatten(order='C')
  552. return a, b, c
  553. @pytest.mark.level1
  554. @pytest.mark.platform_arm_ascend_training
  555. @pytest.mark.platform_x86_ascend_training
  556. @pytest.mark.platform_x86_gpu_training
  557. @pytest.mark.platform_x86_cpu
  558. @pytest.mark.env_onecard
  559. def test_ndarray_flatten():
  560. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  561. mnp_array = mnp.asarray(onp_array)
  562. o_flatten = onp_ndarray_flatten(onp_array)
  563. m_flatten = mnp_ndarray_flatten(mnp_array)
  564. check_all_results(o_flatten, m_flatten)
  565. # Test ndarray.transpose
  566. def mnp_ndarray_transpose(input_tensor):
  567. a = input_tensor.T
  568. b = input_tensor.transpose()
  569. c = input_tensor.transpose((0, 2, 1))
  570. d = input_tensor.transpose([0, 2, 1])
  571. return a, b, c, d
  572. def onp_ndarray_transpose(input_array):
  573. a = input_array.T
  574. b = input_array.transpose()
  575. c = input_array.transpose((0, 2, 1))
  576. d = input_array.transpose([0, 2, 1])
  577. return a, b, c, d
  578. @pytest.mark.level1
  579. @pytest.mark.platform_arm_ascend_training
  580. @pytest.mark.platform_x86_ascend_training
  581. @pytest.mark.platform_x86_gpu_training
  582. @pytest.mark.platform_x86_cpu
  583. @pytest.mark.env_onecard
  584. def test_ndarray_transpose():
  585. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  586. mnp_array = mnp.asarray(onp_array)
  587. o_transposed = onp_ndarray_transpose(onp_array)
  588. m_transposed = mnp_ndarray_transpose(mnp_array)
  589. check_all_results(o_transposed, m_transposed)
  590. # Test ndarray.astype
  591. def mnp_ndarray_astype(input_tensor):
  592. a = input_tensor.astype("float16")
  593. b = input_tensor.astype(onp.float64)
  594. c = input_tensor.astype(mnp.bool_)
  595. return a, b, c
  596. def onp_ndarray_astype(input_array):
  597. a = input_array.astype("float16")
  598. b = input_array.astype(onp.float64)
  599. c = input_array.astype(onp.bool_)
  600. return a, b, c
  601. @pytest.mark.level1
  602. @pytest.mark.platform_arm_ascend_training
  603. @pytest.mark.platform_x86_ascend_training
  604. @pytest.mark.platform_x86_gpu_training
  605. @pytest.mark.platform_x86_cpu
  606. @pytest.mark.env_onecard
  607. def test_ndarray_astype():
  608. onp_array = onp.random.random((3, 4, 5)).astype('float32')
  609. mnp_array = mnp.asarray(onp_array)
  610. o_astype = onp_ndarray_astype(onp_array)
  611. m_astype = mnp_ndarray_astype(mnp_array)
  612. for arr1, arr2 in zip(o_astype, m_astype):
  613. assert arr1.dtype == arr2.asnumpy().dtype
  614. def onp_concatenate_type_promotion(onp_array1, onp_array2, onp_array3, onp_array4):
  615. o_concatenate = onp.concatenate((onp_array1,
  616. onp_array2,
  617. onp_array3,
  618. onp_array4), -1)
  619. return o_concatenate
  620. def mnp_concatenate_type_promotion(mnp_array1, mnp_array2, mnp_array3, mnp_array4):
  621. m_concatenate = mnp.concatenate([mnp_array1,
  622. mnp_array2,
  623. mnp_array3,
  624. mnp_array4], -1)
  625. return m_concatenate
  626. @pytest.mark.level1
  627. @pytest.mark.platform_arm_ascend_training
  628. @pytest.mark.platform_x86_ascend_training
  629. @pytest.mark.platform_x86_gpu_training
  630. @pytest.mark.platform_x86_cpu
  631. @pytest.mark.env_onecard
  632. def test_concatenate_type_promotion():
  633. onp_array = onp.random.random((5, 1)).astype('float32')
  634. mnp_array = mnp.asarray(onp_array)
  635. onp_array1 = onp_array.astype(onp.float16)
  636. onp_array2 = onp_array.astype(onp.bool_)
  637. onp_array3 = onp_array.astype(onp.float32)
  638. onp_array4 = onp_array.astype(onp.int32)
  639. mnp_array1 = mnp_array.astype(onp.float16)
  640. mnp_array2 = mnp_array.astype(onp.bool_)
  641. mnp_array3 = mnp_array.astype(onp.float32)
  642. mnp_array4 = mnp_array.astype(onp.int32)
  643. o_concatenate = onp_concatenate_type_promotion(
  644. onp_array1, onp_array2, onp_array3, onp_array4).astype('float32')
  645. m_concatenate = mnp_concatenate_type_promotion(
  646. mnp_array1, mnp_array2, mnp_array3, mnp_array4)
  647. check_all_results(o_concatenate, m_concatenate, error=1e-7)
  648. def mnp_stack(*arrs):
  649. a = mnp.stack(arrs, axis=-4)
  650. b = mnp.stack(arrs, axis=-3)
  651. c = mnp.stack(arrs, axis=0)
  652. d = mnp.stack(arrs, axis=3)
  653. e = mnp.stack(arrs, axis=2)
  654. return a, b, c, d, e
  655. def onp_stack(*arrs):
  656. a = onp.stack(arrs, axis=-4)
  657. b = onp.stack(arrs, axis=-3)
  658. c = onp.stack(arrs, axis=0)
  659. d = onp.stack(arrs, axis=3)
  660. e = onp.stack(arrs, axis=2)
  661. return a, b, c, d, e
  662. @pytest.mark.level1
  663. @pytest.mark.platform_arm_ascend_training
  664. @pytest.mark.platform_x86_ascend_training
  665. @pytest.mark.platform_x86_gpu_training
  666. @pytest.mark.platform_x86_cpu
  667. @pytest.mark.env_onecard
  668. def test_stack():
  669. arr = rand_int(3, 4, 5, 6)
  670. match_res(mnp.stack, onp.stack, arr)
  671. for i in range(-4, 4):
  672. match_res(mnp.stack, onp.stack, arr, axis=i)
  673. arr = rand_int(7, 4, 0, 3)
  674. match_res(mnp.stack, onp.stack, arr)
  675. for i in range(-4, 4):
  676. match_res(mnp.stack, onp.stack, arr, axis=i)
  677. arrs = [rand_int(3, 4, 5) for i in range(10)]
  678. match_res(mnp.stack, onp.stack, arrs)
  679. match_res(mnp.stack, onp.stack, tuple(arrs))
  680. match_res(mnp_stack, onp_stack, *arrs)
  681. for i in range(-4, 4):
  682. match_res(mnp.stack, onp.stack, arrs, axis=i)
  683. arrs = [rand_int(3, 0, 5, 8, 0) for i in range(5)]
  684. match_res(mnp.stack, onp.stack, arrs)
  685. match_res(mnp.stack, onp.stack, tuple(arrs))
  686. match_res(mnp_stack, onp_stack, *arrs)
  687. for i in range(-6, 6):
  688. match_res(mnp.stack, onp.stack, arrs, axis=i)
  689. class ReshapeExpandSqueeze(Cell):
  690. def __init__(self):
  691. super(ReshapeExpandSqueeze, self).__init__()
  692. def construct(self, x):
  693. x = mnp.expand_dims(x, 2)
  694. x = mnp.reshape(x, (1, 2, 3, 4, 1, 1))
  695. x = mnp.squeeze(x)
  696. return x
  697. class TransposeConcatRavel(Cell):
  698. def __init__(self):
  699. super(TransposeConcatRavel, self).__init__()
  700. def construct(self, x1, x2, x3):
  701. x1 = mnp.transpose(x1, [0, 2, 1])
  702. x2 = x2.transpose(0, 2, 1)
  703. x = mnp.concatenate((x1, x2, x3), -1)
  704. x = mnp.ravel(x)
  705. return x
  706. class RollSwap(Cell):
  707. def __init__(self):
  708. super(RollSwap, self).__init__()
  709. def construct(self, x):
  710. x = mnp.rollaxis(x, 2)
  711. x = mnp.swapaxes(x, 0, 1)
  712. return x
  713. test_case_array_ops = [
  714. ('ReshapeExpandSqueeze', {
  715. 'block': ReshapeExpandSqueeze(),
  716. 'desc_inputs': [mnp.ones((2, 3, 4))]}),
  717. ('TransposeConcatRavel', {
  718. 'block': TransposeConcatRavel(),
  719. 'desc_inputs': [mnp.ones((2, 3, 4)),
  720. mnp.ones((2, 3, 4)),
  721. mnp.ones((2, 4, 1))]}),
  722. ('RollSwap', {
  723. 'block': RollSwap(),
  724. 'desc_inputs': [mnp.ones((2, 3, 4))]})
  725. ]
  726. test_case_lists = [test_case_array_ops]
  727. test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists)
  728. # use -k to select certain testcast
  729. # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm
  730. @pytest.mark.level1
  731. @pytest.mark.platform_arm_ascend_training
  732. @pytest.mark.platform_x86_ascend_training
  733. @pytest.mark.platform_x86_gpu_training
  734. @pytest.mark.platform_x86_cpu
  735. @pytest.mark.env_onecard
  736. def test_expand_dims_exception():
  737. with pytest.raises(TypeError):
  738. mnp.expand_dims(mnp.ones((3, 3)), 1.2)
  739. @pytest.mark.level1
  740. @pytest.mark.platform_arm_ascend_training
  741. @pytest.mark.platform_x86_ascend_training
  742. @pytest.mark.platform_x86_gpu_training
  743. @pytest.mark.platform_x86_cpu
  744. @pytest.mark.env_onecard
  745. def test_swapaxes_exception():
  746. with pytest.raises(ValueError):
  747. mnp.swapaxes(mnp.ones((3, 3)), 1, 10)
  748. @pytest.mark.level1
  749. @pytest.mark.platform_arm_ascend_training
  750. @pytest.mark.platform_x86_ascend_training
  751. @pytest.mark.platform_x86_gpu_training
  752. @pytest.mark.platform_x86_cpu
  753. @pytest.mark.env_onecard
  754. def test_tensor_flatten():
  755. lst = [[1.0, 2.0], [3.0, 4.0]]
  756. tensor_list = mnp.asarray(lst)
  757. assert tensor_list.flatten().asnumpy().tolist() == [1.0, 2.0, 3.0, 4.0]
  758. assert tensor_list.flatten(order='F').asnumpy().tolist() == [
  759. 1.0, 3.0, 2.0, 4.0]
  760. @pytest.mark.level1
  761. @pytest.mark.platform_arm_ascend_training
  762. @pytest.mark.platform_x86_ascend_training
  763. @pytest.mark.platform_x86_gpu_training
  764. @pytest.mark.platform_x86_cpu
  765. @pytest.mark.env_onecard
  766. def test_tensor_reshape():
  767. lst = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]
  768. tensor_list = mnp.asarray(lst)
  769. with pytest.raises(TypeError):
  770. tensor_list = tensor_list.reshape({0, 1, 2})
  771. with pytest.raises(ValueError):
  772. tensor_list = tensor_list.reshape(1, 2, 3)
  773. assert tensor_list.reshape([-1, 4]).shape == (2, 4)
  774. assert tensor_list.reshape(1, -1, 4).shape == (1, 2, 4)
  775. @pytest.mark.level1
  776. @pytest.mark.platform_arm_ascend_training
  777. @pytest.mark.platform_x86_ascend_training
  778. @pytest.mark.platform_x86_gpu_training
  779. @pytest.mark.platform_x86_cpu
  780. @pytest.mark.env_onecard
  781. def test_tensor_squeeze():
  782. lst = [[[1.0], [2.0], [3.0]]]
  783. tensor_list = mnp.asarray(lst)
  784. with pytest.raises(TypeError):
  785. tensor_list = tensor_list.squeeze(1.2)
  786. with pytest.raises(ValueError):
  787. tensor_list = tensor_list.squeeze(4)
  788. assert tensor_list.squeeze().shape == (3,)
  789. assert tensor_list.squeeze(axis=2).shape == (1, 3)
  790. @pytest.mark.level1
  791. @pytest.mark.platform_arm_ascend_training
  792. @pytest.mark.platform_x86_ascend_training
  793. @pytest.mark.platform_x86_gpu_training
  794. @pytest.mark.platform_x86_cpu
  795. @pytest.mark.env_onecard
  796. def test_tensor_ravel():
  797. lst = [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]
  798. tensor_list = mnp.asarray(lst)
  799. assert tensor_list.ravel().shape == (8,)
  800. assert tensor_list.ravel().asnumpy().tolist() == [
  801. 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]
  802. @pytest.mark.level1
  803. @pytest.mark.platform_arm_ascend_training
  804. @pytest.mark.platform_x86_ascend_training
  805. @pytest.mark.platform_x86_gpu_training
  806. @pytest.mark.platform_x86_cpu
  807. @pytest.mark.env_onecard
  808. def test_tensor_swapaxes():
  809. lst = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]
  810. tensor_list = mnp.asarray(lst)
  811. with pytest.raises(TypeError):
  812. tensor_list = tensor_list.swapaxes(0, (1,))
  813. with pytest.raises(ValueError):
  814. tensor_list = tensor_list.swapaxes(0, 3)
  815. assert tensor_list.swapaxes(0, 1).shape == (3, 2)