You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_math_ops.py 32 kB

4 years ago
4 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """unit tests for numpy math operations"""
  16. import pytest
  17. import numpy as onp
  18. import mindspore.numpy as mnp
  19. from .utils import rand_int, rand_bool, run_binop_test, run_unary_test, run_multi_test, \
  20. run_single_test, match_res, match_array, match_meta
  21. class Cases():
  22. def __init__(self):
  23. self.arrs = [
  24. rand_int(2),
  25. rand_int(2, 3),
  26. rand_int(2, 3, 4),
  27. ]
  28. # scalars expanded across the 0th dimension
  29. self.scalars = [
  30. rand_int(),
  31. rand_int(1),
  32. rand_int(1, 1),
  33. ]
  34. # empty arrays
  35. self.empty_arrs = [
  36. rand_int(0),
  37. rand_int(4, 0),
  38. rand_int(2, 0, 2),
  39. ]
  40. # arrays of the same size expanded across the 0th dimension
  41. self.expanded_arrs = [
  42. rand_int(2, 3),
  43. rand_int(1, 2, 3),
  44. rand_int(1, 1, 2, 3),
  45. ]
  46. # arrays with last dimension aligned
  47. self.aligned_arrs = [
  48. rand_int(2, 3),
  49. rand_int(1, 4, 3),
  50. rand_int(5, 1, 2, 3),
  51. rand_int(4, 2, 1, 1, 3),
  52. ]
  53. # arrays which can be broadcast
  54. self.broadcastables = [
  55. rand_int(5),
  56. rand_int(6, 1),
  57. rand_int(7, 1, 5),
  58. ]
  59. # boolean arrays which can be broadcast
  60. self.bool_broadcastables = [
  61. rand_bool(),
  62. rand_bool(1),
  63. rand_bool(5),
  64. rand_bool(6, 1),
  65. rand_bool(7, 1, 5),
  66. rand_bool(8, 1, 6, 1),
  67. ]
  68. # core dimension 0 is matched for each
  69. # pair of array[i] and array[i + 1]
  70. self.core_broadcastables = [
  71. rand_int(3),
  72. rand_int(3),
  73. rand_int(6),
  74. rand_int(6, 4),
  75. rand_int(5, 2),
  76. rand_int(2),
  77. rand_int(2, 9),
  78. rand_int(9, 8),
  79. rand_int(6),
  80. rand_int(2, 6, 5),
  81. rand_int(9, 2, 7),
  82. rand_int(7),
  83. rand_int(5, 2, 4),
  84. rand_int(6, 1, 4, 9),
  85. rand_int(7, 1, 5, 3, 2),
  86. rand_int(8, 1, 6, 1, 2, 9),
  87. ]
  88. # arrays with dimensions of size 1
  89. self.nested_arrs = [
  90. rand_int(1),
  91. rand_int(1, 2),
  92. rand_int(3, 1, 8),
  93. rand_int(1, 3, 9, 1),
  94. ]
  95. test_case = Cases()
  96. def mnp_add(x1, x2):
  97. return mnp.add(x1, x2)
  98. def onp_add(x1, x2):
  99. return onp.add(x1, x2)
  100. def mnp_subtract(x1, x2):
  101. return mnp.subtract(x1, x2)
  102. def onp_subtract(x1, x2):
  103. return onp.subtract(x1, x2)
  104. def mnp_mutiply(x1, x2):
  105. return mnp.multiply(x1, x2)
  106. def onp_multiply(x1, x2):
  107. return onp.multiply(x1, x2)
  108. def mnp_divide(x1, x2):
  109. return mnp.divide(x1, x2)
  110. def onp_divide(x1, x2):
  111. return onp.divide(x1, x2)
  112. def mnp_true_divide(x1, x2):
  113. return mnp.true_divide(x1, x2)
  114. def onp_true_divide(x1, x2):
  115. return onp.true_divide(x1, x2)
  116. def mnp_power(x1, x2):
  117. return mnp.power(x1, x2)
  118. def onp_power(x1, x2):
  119. return onp.power(x1, x2)
  120. def mnp_float_power(x1, x2):
  121. return mnp.float_power(x1, x2)
  122. def onp_float_power(x1, x2):
  123. return onp.float_power(x1, x2)
  124. def mnp_minimum(a, b):
  125. return mnp.minimum(a, b)
  126. def onp_minimum(a, b):
  127. return onp.minimum(a, b)
  128. @pytest.mark.level1
  129. @pytest.mark.platform_arm_ascend_training
  130. @pytest.mark.platform_x86_ascend_training
  131. @pytest.mark.platform_x86_gpu_training
  132. @pytest.mark.platform_x86_cpu
  133. @pytest.mark.env_onecard
  134. def test_add():
  135. run_binop_test(mnp_add, onp_add, test_case)
  136. @pytest.mark.level1
  137. @pytest.mark.platform_arm_ascend_training
  138. @pytest.mark.platform_x86_ascend_training
  139. @pytest.mark.platform_x86_gpu_training
  140. @pytest.mark.platform_x86_cpu
  141. @pytest.mark.env_onecard
  142. def test_subtract():
  143. run_binop_test(mnp_subtract, onp_subtract, test_case)
  144. @pytest.mark.level1
  145. @pytest.mark.platform_arm_ascend_training
  146. @pytest.mark.platform_x86_ascend_training
  147. @pytest.mark.platform_x86_gpu_training
  148. @pytest.mark.platform_x86_cpu
  149. @pytest.mark.env_onecard
  150. def test_multiply():
  151. run_binop_test(mnp_mutiply, onp_multiply, test_case)
  152. @pytest.mark.level1
  153. @pytest.mark.platform_arm_ascend_training
  154. @pytest.mark.platform_x86_ascend_training
  155. @pytest.mark.platform_x86_gpu_training
  156. @pytest.mark.platform_x86_cpu
  157. @pytest.mark.env_onecard
  158. def test_divide():
  159. run_binop_test(mnp_divide, onp_divide, test_case)
  160. @pytest.mark.level1
  161. @pytest.mark.platform_arm_ascend_training
  162. @pytest.mark.platform_x86_ascend_training
  163. @pytest.mark.platform_x86_gpu_training
  164. @pytest.mark.platform_x86_cpu
  165. @pytest.mark.env_onecard
  166. def test_true_divide():
  167. run_binop_test(mnp_true_divide, onp_true_divide, test_case)
  168. @pytest.mark.level1
  169. @pytest.mark.platform_arm_ascend_training
  170. @pytest.mark.platform_x86_ascend_training
  171. @pytest.mark.platform_x86_gpu_training
  172. @pytest.mark.platform_x86_cpu
  173. @pytest.mark.env_onecard
  174. def test_power():
  175. run_binop_test(mnp_power, onp_power, test_case, error=1e-5)
  176. @pytest.mark.level1
  177. @pytest.mark.platform_arm_ascend_training
  178. @pytest.mark.platform_x86_ascend_training
  179. @pytest.mark.platform_x86_gpu_training
  180. @pytest.mark.platform_x86_cpu
  181. @pytest.mark.env_onecard
  182. def test_float_power():
  183. run_binop_test(mnp_float_power, onp_float_power, test_case, error=1e-5)
  184. @pytest.mark.level1
  185. @pytest.mark.platform_arm_ascend_training
  186. @pytest.mark.platform_x86_ascend_training
  187. @pytest.mark.platform_x86_gpu_training
  188. @pytest.mark.platform_x86_cpu
  189. @pytest.mark.env_onecard
  190. def test_minimum():
  191. run_binop_test(mnp_minimum, onp_minimum, test_case)
  192. def mnp_add_kwargs(x, y, where=None, out=None):
  193. return mnp.add(x, y, where=where, out=out)
  194. def onp_add_kwargs(x, y, where=None, out=None):
  195. return onp.add(x, y, where=where, out=out)
  196. @pytest.mark.level1
  197. @pytest.mark.platform_arm_ascend_training
  198. @pytest.mark.platform_x86_ascend_training
  199. @pytest.mark.platform_x86_gpu_training
  200. @pytest.mark.platform_x86_cpu
  201. @pytest.mark.env_onecard
  202. def test_add_kwargs():
  203. for where in test_case.bool_broadcastables[:2]:
  204. for x in test_case.broadcastables[:2]:
  205. for y in test_case.broadcastables[:2]:
  206. shape_out = onp.broadcast(where, x, y).shape
  207. out = rand_int(*shape_out)
  208. match_res(mnp_add_kwargs, onp_add_kwargs, x, y, where, out)
  209. def mnp_tensordot(x, y):
  210. a = mnp.tensordot(x, y)
  211. b = mnp.tensordot(x, y, axes=0)
  212. c = mnp.tensordot(x, y, axes=1)
  213. d = mnp.tensordot(x, y, axes=2)
  214. e = mnp.tensordot(x, y, axes=(3, 0))
  215. f = mnp.tensordot(x, y, axes=[2, 1])
  216. g = mnp.tensordot(x, y, axes=((2, 3), (0, 1)))
  217. h = mnp.tensordot(x, y, axes=[[3, 2], [1, 0]])
  218. return a, b, c, d, e, f, g, h
  219. def onp_tensordot(x, y):
  220. a = onp.tensordot(x, y)
  221. b = onp.tensordot(x, y, axes=0)
  222. c = onp.tensordot(x, y, axes=1)
  223. d = onp.tensordot(x, y, axes=2)
  224. e = onp.tensordot(x, y, axes=(3, 0))
  225. f = onp.tensordot(x, y, axes=[2, 1])
  226. g = onp.tensordot(x, y, axes=((2, 3), (0, 1)))
  227. h = onp.tensordot(x, y, axes=[[3, 2], [1, 0]])
  228. return a, b, c, d, e, f, g, h
  229. @pytest.mark.level1
  230. @pytest.mark.platform_arm_ascend_training
  231. @pytest.mark.platform_x86_ascend_training
  232. @pytest.mark.platform_x86_gpu_training
  233. @pytest.mark.platform_x86_cpu
  234. @pytest.mark.env_onecard
  235. def test_tensordot():
  236. x = rand_int(4, 2, 7, 7)
  237. y = rand_int(7, 7, 6)
  238. run_multi_test(mnp_tensordot, onp_tensordot, (x, y))
  239. def mnp_std(x):
  240. a = mnp.std(x)
  241. b = mnp.std(x, axis=None)
  242. c = mnp.std(x, axis=0)
  243. d = mnp.std(x, axis=1)
  244. e = mnp.std(x, axis=(-1, 1))
  245. f = mnp.std(x, axis=(0, 1, 2))
  246. g = mnp.std(x, axis=None, ddof=1, keepdims=True)
  247. h = mnp.std(x, axis=0, ddof=1, keepdims=True)
  248. i = mnp.std(x, axis=(2), ddof=1, keepdims=True)
  249. return a, b, c, d, e, f, g, h, i
  250. def onp_std(x):
  251. a = onp.std(x)
  252. b = onp.std(x, axis=None)
  253. c = onp.std(x, axis=0)
  254. d = onp.std(x, axis=1)
  255. e = onp.std(x, axis=(-1, 1))
  256. f = onp.std(x, axis=(0, 1, 2))
  257. g = onp.std(x, axis=None, ddof=1, keepdims=True)
  258. h = onp.std(x, axis=0, ddof=1, keepdims=True)
  259. i = onp.std(x, axis=(2), ddof=1, keepdims=True)
  260. return a, b, c, d, e, f, g, h, i
  261. @pytest.mark.level1
  262. @pytest.mark.platform_arm_ascend_training
  263. @pytest.mark.platform_x86_ascend_training
  264. @pytest.mark.platform_x86_gpu_training
  265. @pytest.mark.platform_x86_cpu
  266. @pytest.mark.env_onecard
  267. def test_std():
  268. arr1 = rand_int(2, 3, 4, 5)
  269. arr2 = rand_int(4, 5, 4, 3, 3)
  270. run_single_test(mnp_std, onp_std, arr1, error=1e-5)
  271. run_single_test(mnp_std, onp_std, arr2, error=1e-5)
  272. def mnp_var(x):
  273. a = mnp.std(x)
  274. b = mnp.std(x, axis=0)
  275. c = mnp.std(x, axis=(0))
  276. d = mnp.std(x, axis=(0, 1, 2))
  277. e = mnp.std(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
  278. return a, b, c, d, e
  279. def onp_var(x):
  280. a = onp.std(x)
  281. b = onp.std(x, axis=0)
  282. c = onp.std(x, axis=(0))
  283. d = onp.std(x, axis=(0, 1, 2))
  284. e = onp.std(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
  285. return a, b, c, d, e
  286. @pytest.mark.level1
  287. @pytest.mark.platform_arm_ascend_training
  288. @pytest.mark.platform_x86_ascend_training
  289. @pytest.mark.platform_x86_gpu_training
  290. @pytest.mark.platform_x86_cpu
  291. @pytest.mark.env_onecard
  292. def test_var():
  293. arr1 = rand_int(2, 3, 4, 5)
  294. arr2 = rand_int(4, 5, 4, 3, 3)
  295. run_single_test(mnp_var, onp_var, arr1, error=1e-5)
  296. run_single_test(mnp_var, onp_var, arr2, error=1e-5)
  297. def mnp_average(x):
  298. a = mnp.average(x)
  299. b = mnp.average(x, axis=None)
  300. c = mnp.average(x, axis=0)
  301. d = mnp.average(x, axis=1)
  302. e = mnp.average(x, axis=(-2, 1))
  303. f = mnp.average(x, axis=(0, 1, 2, 3))
  304. g = mnp.average(x, axis=None, weights=x)
  305. h = mnp.average(x, axis=0, weights=x)
  306. i = mnp.average(x, axis=(1, 2, 3), weights=x)
  307. return a, b, c, d, e, f, g, h, i
  308. def onp_average(x):
  309. a = onp.average(x)
  310. b = onp.average(x, axis=None)
  311. c = onp.average(x, axis=0)
  312. d = onp.average(x, axis=1)
  313. e = onp.average(x, axis=(-2, 1))
  314. f = onp.average(x, axis=(0, 1, 2, 3))
  315. g = onp.average(x, axis=None, weights=x)
  316. h = onp.average(x, axis=0, weights=x)
  317. i = onp.average(x, axis=(1, 2, 3), weights=x)
  318. return a, b, c, d, e, f, g, h, i
  319. @pytest.mark.level1
  320. @pytest.mark.platform_arm_ascend_training
  321. @pytest.mark.platform_x86_ascend_training
  322. @pytest.mark.platform_x86_gpu_training
  323. @pytest.mark.platform_x86_cpu
  324. @pytest.mark.env_onecard
  325. def test_average():
  326. arr1 = rand_int(2, 3, 4, 5)
  327. arr2 = rand_int(4, 5, 1, 3, 1)
  328. run_single_test(mnp_average, onp_average, arr1, error=1e-5)
  329. run_single_test(mnp_average, onp_average, arr2, error=1e-5)
  330. def mnp_count_nonzero(x):
  331. a = mnp.count_nonzero(x)
  332. b = mnp.count_nonzero(x, axis=None)
  333. c = mnp.count_nonzero(x, axis=0)
  334. d = mnp.count_nonzero(x, axis=1)
  335. e = mnp.count_nonzero(x, axis=(-2, 1))
  336. f = mnp.count_nonzero(x, axis=(0, 1, 2, 3))
  337. return a, b, c, d, e, f
  338. def onp_count_nonzero(x):
  339. a = onp.count_nonzero(x)
  340. b = onp.count_nonzero(x, axis=None)
  341. c = onp.count_nonzero(x, axis=0)
  342. d = onp.count_nonzero(x, axis=1)
  343. e = onp.count_nonzero(x, axis=(-2, 1))
  344. f = onp.count_nonzero(x, axis=(0, 1, 2, 3))
  345. return a, b, c, d, e, f
  346. @pytest.mark.level1
  347. @pytest.mark.platform_arm_ascend_training
  348. @pytest.mark.platform_x86_ascend_training
  349. @pytest.mark.platform_x86_gpu_training
  350. @pytest.mark.platform_x86_cpu
  351. @pytest.mark.env_onecard
  352. def test_count_nonzero():
  353. # minus 5 to make some values below zero
  354. arr1 = rand_int(2, 3, 4, 5) - 5
  355. arr2 = rand_int(4, 5, 4, 3, 3) - 5
  356. run_single_test(mnp_count_nonzero, onp_count_nonzero, arr1)
  357. run_single_test(mnp_count_nonzero, onp_count_nonzero, arr2)
  358. def mnp_inner(a, b):
  359. return mnp.inner(a, b)
  360. def onp_inner(a, b):
  361. return onp.inner(a, b)
  362. @pytest.mark.level1
  363. @pytest.mark.platform_arm_ascend_training
  364. @pytest.mark.platform_x86_ascend_training
  365. @pytest.mark.platform_x86_gpu_training
  366. @pytest.mark.platform_x86_cpu
  367. @pytest.mark.env_onecard
  368. def test_inner():
  369. for arr1 in test_case.aligned_arrs:
  370. for arr2 in test_case.aligned_arrs:
  371. match_res(mnp_inner, onp_inner, arr1, arr2)
  372. for scalar1 in test_case.scalars:
  373. for scalar2 in test_case.scalars:
  374. match_res(mnp_inner, onp_inner,
  375. scalar1, scalar2)
  376. def mnp_dot(a, b):
  377. return mnp.dot(a, b)
  378. def onp_dot(a, b):
  379. return onp.dot(a, b)
  380. @pytest.mark.level1
  381. @pytest.mark.platform_arm_ascend_training
  382. @pytest.mark.platform_x86_ascend_training
  383. @pytest.mark.platform_x86_gpu_training
  384. @pytest.mark.platform_x86_cpu
  385. @pytest.mark.env_onecard
  386. def test_dot():
  387. # test case (1D, 1D)
  388. match_res(mnp_dot, onp_dot, rand_int(3), rand_int(3))
  389. # test case (2D, 2D)
  390. match_res(mnp_dot, onp_dot, rand_int(4, 7), rand_int(7, 2))
  391. # test case (0D, _) (_, 0D)
  392. match_res(mnp_dot, onp_dot, rand_int(), rand_int(1, 9, 3))
  393. match_res(mnp_dot, onp_dot, rand_int(8, 5, 6, 3), rand_int())
  394. # test case (ND, 1D)
  395. match_res(mnp_dot, onp_dot, rand_int(2, 4, 5), rand_int(5))
  396. # test case (ND, MD)
  397. match_res(mnp_dot, onp_dot, rand_int(5, 4, 1, 8), rand_int(8, 3))
  398. for i in range(8):
  399. match_res(mnp_dot, onp_dot,
  400. test_case.core_broadcastables[2*i], test_case.core_broadcastables[2*i + 1])
  401. def mnp_outer(a, b):
  402. return mnp.outer(a, b)
  403. def onp_outer(a, b):
  404. return onp.outer(a, b)
  405. @pytest.mark.level1
  406. @pytest.mark.platform_arm_ascend_training
  407. @pytest.mark.platform_x86_ascend_training
  408. @pytest.mark.platform_x86_gpu_training
  409. @pytest.mark.platform_x86_cpu
  410. @pytest.mark.env_onecard
  411. def test_outer():
  412. run_binop_test(mnp_outer, onp_outer, test_case)
  413. @pytest.mark.level1
  414. @pytest.mark.platform_arm_ascend_training
  415. @pytest.mark.platform_x86_ascend_training
  416. @pytest.mark.platform_x86_gpu_training
  417. @pytest.mark.platform_x86_cpu
  418. @pytest.mark.env_onecard
  419. def test_type_promotion():
  420. arr = rand_int(2, 3)
  421. onp_sum = onp_add(arr, arr)
  422. a = mnp.asarray(arr, dtype='float16')
  423. b = mnp.asarray(arr, dtype='float32')
  424. c = mnp.asarray(arr, dtype='int32')
  425. match_array(mnp_add(a, b).asnumpy(), onp_sum)
  426. match_array(mnp_add(b, c).asnumpy(), onp_sum)
  427. def mnp_absolute(x):
  428. return mnp.absolute(x)
  429. def onp_absolute(x):
  430. return onp.absolute(x)
  431. @pytest.mark.level1
  432. @pytest.mark.platform_arm_ascend_training
  433. @pytest.mark.platform_x86_ascend_training
  434. @pytest.mark.platform_x86_gpu_training
  435. @pytest.mark.platform_x86_cpu
  436. @pytest.mark.env_onecard
  437. def test_absolute():
  438. arr = rand_int(2, 3)
  439. a = mnp.asarray(arr, dtype='float16')
  440. b = mnp.asarray(arr, dtype='float32')
  441. c = mnp.asarray(arr, dtype='uint8')
  442. d = mnp.asarray(arr, dtype='bool')
  443. match_array(mnp_absolute(a).asnumpy(), onp_absolute(a.asnumpy()))
  444. match_array(mnp_absolute(b).asnumpy(), onp_absolute(b.asnumpy()))
  445. match_array(mnp_absolute(c).asnumpy(), onp_absolute(c.asnumpy()))
  446. match_array(mnp_absolute(d).asnumpy(), onp_absolute(d.asnumpy()))
  447. where = rand_int(2, 3).astype('bool')
  448. out = rand_int(2, 3)
  449. match_array(mnp.absolute(a, out=mnp.asarray(out), where=mnp.asarray(where)).asnumpy(),
  450. onp.absolute(a.asnumpy(), out=out, where=where))
  451. @pytest.mark.level1
  452. @pytest.mark.platform_arm_ascend_training
  453. @pytest.mark.platform_x86_ascend_training
  454. @pytest.mark.platform_x86_gpu_training
  455. @pytest.mark.platform_x86_cpu
  456. @pytest.mark.env_onecard
  457. def test_deg2rad_rad2deg():
  458. arrs = [rand_int(2, 3), rand_int(1, 2, 4), rand_int(2, 4)]
  459. for arr in arrs:
  460. match_res(mnp.deg2rad, onp.deg2rad, arr)
  461. match_res(mnp.rad2deg, onp.rad2deg, arr)
  462. def mnp_ptp(x):
  463. a = mnp.ptp(x)
  464. b = mnp.ptp(x, keepdims=True)
  465. c = mnp.ptp(x, axis=(0, 1))
  466. d = mnp.ptp(x, axis=-1)
  467. return a, b, c, d
  468. def onp_ptp(x):
  469. a = onp.ptp(x)
  470. b = onp.ptp(x, keepdims=True)
  471. c = onp.ptp(x, axis=(0, 1))
  472. d = onp.ptp(x, axis=-1)
  473. return a, b, c, d
  474. @pytest.mark.level1
  475. @pytest.mark.platform_arm_ascend_training
  476. @pytest.mark.platform_x86_ascend_training
  477. @pytest.mark.platform_x86_gpu_training
  478. @pytest.mark.platform_x86_cpu
  479. @pytest.mark.env_onecard
  480. def test_ptp():
  481. arrs = [rand_int(2, 3), rand_int(1, 2, 4), rand_int(2, 4)]
  482. for arr in arrs:
  483. match_res(mnp_ptp, onp_ptp, arr)
  484. def mnp_add_dtype(x1, x2, out, where):
  485. a = mnp.add(x1, x2, dtype=mnp.float16)
  486. b = mnp.add(x1, x2, out=out, dtype=mnp.float16)
  487. c = mnp.add(x1, x2, where=where, dtype=mnp.float16)
  488. d = mnp.add(x1, x2, out=out, where=where, dtype=mnp.float16)
  489. return a, b, c, d
  490. def onp_add_dtype(x1, x2, out, where):
  491. a = onp.add(x1, x2, dtype=onp.float16)
  492. b = onp.add(x1, x2, out=out, dtype=onp.float16)
  493. c = onp.add(x1, x2, where=where, dtype=onp.float16)
  494. d = onp.add(x1, x2, out=out, where=where, dtype=onp.float16)
  495. return a, b, c, d
  496. @pytest.mark.level1
  497. @pytest.mark.platform_arm_ascend_training
  498. @pytest.mark.platform_x86_ascend_training
  499. @pytest.mark.platform_x86_gpu_training
  500. @pytest.mark.platform_x86_cpu
  501. @pytest.mark.env_onecard
  502. def test_add_dtype():
  503. x1 = rand_int(2, 3).astype('int32')
  504. x2 = rand_int(2, 3).astype('int32')
  505. out = rand_int(2, 3).astype('float32')
  506. where = rand_bool(2, 3)
  507. arrs = (x1, x2, out, where)
  508. mnp_arrs = map(mnp.array, arrs)
  509. mnp_res = mnp_add_dtype(*mnp_arrs)
  510. onp_res = onp_add_dtype(*arrs)
  511. for actual, expected in zip(mnp_res, onp_res):
  512. assert actual.asnumpy().dtype == expected.dtype
  513. def mnp_matmul(x1, x2):
  514. return mnp.matmul(x1, x2)
  515. def onp_matmul(x1, x2):
  516. return onp.matmul(x1, x2)
  517. @pytest.mark.level1
  518. @pytest.mark.platform_arm_ascend_training
  519. @pytest.mark.platform_x86_ascend_training
  520. @pytest.mark.platform_x86_gpu_training
  521. @pytest.mark.platform_x86_cpu
  522. @pytest.mark.env_onecard
  523. def test_matmul():
  524. for scalar1 in test_case.scalars[1:]:
  525. for scalar2 in test_case.scalars[1:]:
  526. match_res(mnp_matmul, onp_matmul,
  527. scalar1, scalar2)
  528. for i in range(8):
  529. match_res(mnp_matmul, onp_matmul,
  530. test_case.core_broadcastables[2*i],
  531. test_case.core_broadcastables[2*i + 1])
  532. def mnp_square(x):
  533. return mnp.square(x)
  534. def onp_square(x):
  535. return onp.square(x)
  536. @pytest.mark.level1
  537. @pytest.mark.platform_arm_ascend_training
  538. @pytest.mark.platform_x86_ascend_training
  539. @pytest.mark.platform_x86_gpu_training
  540. @pytest.mark.platform_x86_cpu
  541. @pytest.mark.env_onecard
  542. def test_square():
  543. run_unary_test(mnp_square, onp_square, test_case)
  544. def mnp_sqrt(x):
  545. return mnp.sqrt(x)
  546. def onp_sqrt(x):
  547. return onp.sqrt(x)
  548. @pytest.mark.level1
  549. @pytest.mark.platform_arm_ascend_training
  550. @pytest.mark.platform_x86_ascend_training
  551. @pytest.mark.platform_x86_gpu_training
  552. @pytest.mark.platform_x86_cpu
  553. @pytest.mark.env_onecard
  554. def test_sqrt():
  555. run_unary_test(mnp_sqrt, onp_sqrt, test_case)
  556. def mnp_reciprocal(x):
  557. return mnp.reciprocal(x)
  558. def onp_reciprocal(x):
  559. return onp.reciprocal(x)
  560. @pytest.mark.level1
  561. @pytest.mark.platform_arm_ascend_training
  562. @pytest.mark.platform_x86_ascend_training
  563. @pytest.mark.platform_x86_gpu_training
  564. @pytest.mark.platform_x86_cpu
  565. @pytest.mark.env_onecard
  566. def test_reciprocal():
  567. run_unary_test(mnp_reciprocal, onp_reciprocal, test_case)
  568. def mnp_log(x):
  569. return mnp.log(x)
  570. def onp_log(x):
  571. return onp.log(x)
  572. @pytest.mark.level1
  573. @pytest.mark.platform_arm_ascend_training
  574. @pytest.mark.platform_x86_ascend_training
  575. @pytest.mark.platform_x86_gpu_training
  576. @pytest.mark.platform_x86_cpu
  577. @pytest.mark.env_onecard
  578. def test_log():
  579. run_unary_test(mnp.log, onp.log, test_case, error=1e-5)
  580. def mnp_maximum(x1, x2):
  581. return mnp.maximum(x1, x2)
  582. def onp_maximum(x1, x2):
  583. return onp.maximum(x1, x2)
  584. @pytest.mark.level1
  585. @pytest.mark.platform_arm_ascend_training
  586. @pytest.mark.platform_x86_ascend_training
  587. @pytest.mark.platform_x86_gpu_training
  588. @pytest.mark.platform_x86_cpu
  589. @pytest.mark.env_onecard
  590. def test_maximum():
  591. run_binop_test(mnp_maximum, onp_maximum, test_case)
  592. def mnp_clip(x):
  593. a = mnp.clip(x, mnp.asarray(10.0), mnp.asarray([2,]))
  594. b = mnp.clip(x, 0, 1)
  595. c = mnp.clip(x, mnp.asarray(0), mnp.asarray(10), dtype=mnp.float32)
  596. return a, b, c
  597. def onp_clip(x):
  598. a = onp.clip(x, onp.asarray(10.0), onp.asarray([2,]))
  599. b = onp.clip(x, 0, 1)
  600. c = onp.clip(x, onp.asarray(0), onp.asarray(10), dtype=onp.float32)
  601. return a, b, c
  602. @pytest.mark.level1
  603. @pytest.mark.platform_arm_ascend_training
  604. @pytest.mark.platform_x86_ascend_training
  605. @pytest.mark.platform_x86_gpu_training
  606. @pytest.mark.platform_x86_cpu
  607. @pytest.mark.env_onecard
  608. def test_clip():
  609. run_unary_test(mnp_clip, onp_clip, test_case)
  610. def mnp_amax(x, mask):
  611. a = mnp.amax(x)
  612. b = mnp.amax(x, axis=-3)
  613. c = mnp.amax(x, keepdims=True)
  614. d = mnp.amax(x, initial=3)
  615. e = mnp.amax(x, axis=(0, 1), keepdims=True)
  616. f = mnp.amax(x, initial=4, where=mask)
  617. g = mnp.amax(x, initial=5, where=mask, keepdims=True)
  618. h = mnp.amax(x, axis=(1, 2, 3), initial=6, where=mask)
  619. return a, b, c, d, e, f, g, h
  620. def onp_amax(x, mask):
  621. a = onp.amax(x)
  622. b = onp.amax(x, axis=-3)
  623. c = onp.amax(x, keepdims=True)
  624. d = onp.amax(x, initial=3)
  625. e = onp.amax(x, axis=(0, 1), keepdims=True)
  626. f = onp.amax(x, initial=4, where=mask)
  627. g = onp.amax(x, initial=5, where=mask, keepdims=True)
  628. h = onp.amax(x, axis=(1, 2, 3), initial=6, where=mask)
  629. return a, b, c, d, e, f, g, h
  630. @pytest.mark.level1
  631. @pytest.mark.platform_arm_ascend_training
  632. @pytest.mark.platform_x86_ascend_training
  633. @pytest.mark.platform_x86_gpu_training
  634. @pytest.mark.platform_x86_cpu
  635. @pytest.mark.env_onecard
  636. def test_amax():
  637. a = rand_int(2, 3, 4, 5).astype('float32')
  638. mask = rand_bool(2, 3, 4, 5)
  639. run_multi_test(mnp_amax, onp_amax, (a, mask))
  640. def mnp_amin(x, mask):
  641. a = mnp.amin(x)
  642. b = mnp.amin(x, axis=-3)
  643. c = mnp.amin(x, keepdims=True)
  644. d = mnp.amin(x, initial=-1)
  645. e = mnp.amin(x, axis=(0, 1), keepdims=True)
  646. f = mnp.amin(x, initial=-2, where=mask)
  647. g = mnp.amin(x, initial=-3, where=mask, keepdims=True)
  648. h = mnp.amin(x, axis=(1, 2, 3), initial=-4, where=mask)
  649. return a, b, c, d, e, f, g, h
  650. def onp_amin(x, mask):
  651. a = onp.amin(x)
  652. b = onp.amin(x, axis=-3)
  653. c = onp.amin(x, keepdims=True)
  654. d = onp.amin(x, initial=-1)
  655. e = onp.amin(x, axis=(0, 1), keepdims=True)
  656. f = onp.amin(x, initial=-2, where=mask)
  657. g = onp.amin(x, initial=-3, where=mask, keepdims=True)
  658. h = onp.amin(x, axis=(1, 2, 3), initial=-4, where=mask)
  659. return a, b, c, d, e, f, g, h
  660. @pytest.mark.level1
  661. @pytest.mark.platform_arm_ascend_training
  662. @pytest.mark.platform_x86_ascend_training
  663. @pytest.mark.platform_x86_gpu_training
  664. @pytest.mark.platform_x86_cpu
  665. @pytest.mark.env_onecard
  666. def test_amin():
  667. a = rand_int(2, 3, 4, 5).astype('float32')
  668. mask = rand_bool(2, 3, 4, 5)
  669. run_multi_test(mnp_amin, onp_amin, (a, mask))
  670. def mnp_hypot(x1, x2):
  671. return mnp.hypot(x1, x2)
  672. def onp_hypot(x1, x2):
  673. return onp.hypot(x1, x2)
  674. @pytest.mark.level1
  675. @pytest.mark.platform_arm_ascend_training
  676. @pytest.mark.platform_x86_ascend_training
  677. @pytest.mark.platform_x86_gpu_training
  678. @pytest.mark.platform_x86_cpu
  679. @pytest.mark.env_onecard
  680. def test_hypot():
  681. run_binop_test(mnp_hypot, onp_hypot, test_case)
  682. def mnp_heaviside(x1, x2):
  683. return mnp.heaviside(x1, x2)
  684. def onp_heaviside(x1, x2):
  685. return onp.heaviside(x1, x2)
  686. @pytest.mark.level1
  687. @pytest.mark.platform_arm_ascend_training
  688. @pytest.mark.platform_x86_ascend_training
  689. @pytest.mark.platform_x86_gpu_training
  690. @pytest.mark.platform_x86_cpu
  691. @pytest.mark.env_onecard
  692. def test_heaviside():
  693. broadcastables = test_case.broadcastables
  694. for b1 in broadcastables:
  695. for b2 in broadcastables:
  696. b = onp.subtract(b1, b2)
  697. match_res(mnp_heaviside, onp_heaviside, b, b1)
  698. match_res(mnp_heaviside, onp_heaviside, b, b2)
  699. def mnp_floor(x):
  700. return mnp.floor(x)
  701. def onp_floor(x):
  702. return onp.floor(x)
  703. @pytest.mark.level1
  704. @pytest.mark.platform_arm_ascend_training
  705. @pytest.mark.platform_x86_ascend_training
  706. @pytest.mark.platform_x86_gpu_training
  707. @pytest.mark.platform_x86_cpu
  708. @pytest.mark.env_onecard
  709. def test_floor():
  710. run_unary_test(mnp_floor, onp_floor, test_case)
  711. x = rand_int(2, 3) * onp.random.rand(2, 3)
  712. match_res(mnp_floor, onp_floor, x)
  713. match_res(mnp_floor, onp_floor, -x)
  714. def mnp_floor_divide(x, y):
  715. return mnp.floor_divide(x, y)
  716. def onp_floor_divde(x, y):
  717. return onp.floor_divide(x, y)
  718. @pytest.mark.level1
  719. @pytest.mark.platform_arm_ascend_training
  720. @pytest.mark.platform_x86_ascend_training
  721. @pytest.mark.platform_x86_gpu_training
  722. @pytest.mark.platform_x86_cpu
  723. @pytest.mark.env_onecard
  724. def test_floor_divide():
  725. run_binop_test(mnp_floor_divide, onp_floor_divde, test_case)
  726. def mnp_remainder(x, y):
  727. return mnp.remainder(x, y)
  728. def onp_remainder(x, y):
  729. return onp.remainder(x, y)
  730. @pytest.mark.level1
  731. @pytest.mark.platform_arm_ascend_training
  732. @pytest.mark.platform_x86_ascend_training
  733. @pytest.mark.platform_x86_gpu_training
  734. @pytest.mark.platform_x86_cpu
  735. @pytest.mark.env_onecard
  736. def test_remainder():
  737. x = rand_int(2, 3)
  738. y = rand_int(2, 3)
  739. match_res(mnp_remainder, onp_remainder, x, y)
  740. def mnp_mod(x, y):
  741. return mnp.mod(x, y)
  742. def onp_mod(x, y):
  743. return onp.mod(x, y)
  744. @pytest.mark.level1
  745. @pytest.mark.platform_arm_ascend_training
  746. @pytest.mark.platform_x86_ascend_training
  747. @pytest.mark.platform_x86_gpu_training
  748. @pytest.mark.platform_x86_cpu
  749. @pytest.mark.env_onecard
  750. def test_mod():
  751. x = rand_int(2, 3)
  752. y = rand_int(2, 3)
  753. match_res(mnp_mod, onp_mod, x, y)
  754. def mnp_fmod(x, y):
  755. return mnp.fmod(x, y)
  756. def onp_fmod(x, y):
  757. return onp.fmod(x, y)
  758. @pytest.mark.level1
  759. @pytest.mark.platform_x86_gpu_training
  760. @pytest.mark.platform_x86_cpu
  761. @pytest.mark.env_onecard
  762. def test_fmod():
  763. x = rand_int(2, 3)
  764. y = rand_int(2, 3)
  765. match_res(mnp_fmod, onp_fmod, x, y)
  766. def mnp_fix(x):
  767. return mnp.fix(x)
  768. def onp_fix(x):
  769. return onp.fix(x)
  770. @pytest.mark.level1
  771. @pytest.mark.platform_arm_ascend_training
  772. @pytest.mark.platform_x86_ascend_training
  773. @pytest.mark.platform_x86_gpu_training
  774. @pytest.mark.platform_x86_cpu
  775. @pytest.mark.env_onecard
  776. def test_fix():
  777. x = rand_int(2, 3)
  778. y = rand_int(2, 3)
  779. floats = onp.divide(onp.subtract(x, y), y)
  780. match_res(mnp_fix, onp_fix, floats, error=1e-5)
  781. def mnp_trunc(x):
  782. return mnp.trunc(x)
  783. def onp_trunc(x):
  784. return onp.trunc(x)
  785. @pytest.mark.level1
  786. @pytest.mark.platform_arm_ascend_training
  787. @pytest.mark.platform_x86_ascend_training
  788. @pytest.mark.platform_x86_gpu_training
  789. @pytest.mark.platform_x86_cpu
  790. @pytest.mark.env_onecard
  791. def test_trunc():
  792. x = rand_int(2, 3)
  793. y = rand_int(2, 3)
  794. floats = onp.divide(onp.subtract(x, y), y)
  795. match_res(mnp_trunc, onp_trunc, floats, error=1e-5)
  796. def mnp_exp(x):
  797. return mnp.exp(x)
  798. def onp_exp(x):
  799. return onp.exp(x)
  800. @pytest.mark.level1
  801. @pytest.mark.platform_arm_ascend_training
  802. @pytest.mark.platform_x86_ascend_training
  803. @pytest.mark.platform_x86_gpu_training
  804. @pytest.mark.platform_x86_cpu
  805. @pytest.mark.env_onecard
  806. def test_exp():
  807. run_unary_test(mnp_exp, onp_exp, test_case, error=5)
  808. def mnp_expm1(x):
  809. return mnp.expm1(x)
  810. def onp_expm1(x):
  811. return onp.expm1(x)
  812. @pytest.mark.level1
  813. @pytest.mark.platform_arm_ascend_training
  814. @pytest.mark.platform_x86_ascend_training
  815. @pytest.mark.platform_x86_gpu_training
  816. @pytest.mark.platform_x86_cpu
  817. @pytest.mark.env_onecard
  818. def test_expm1():
  819. run_unary_test(mnp_expm1, onp_expm1, test_case, error=5)
  820. def mnp_positive(x, out, where):
  821. return mnp.positive(x, out=out, where=where)
  822. def onp_positive(x, out, where):
  823. return onp.positive(x, out=out, where=where)
  824. @pytest.mark.level1
  825. @pytest.mark.platform_arm_ascend_training
  826. @pytest.mark.platform_x86_ascend_training
  827. @pytest.mark.platform_x86_gpu_training
  828. @pytest.mark.platform_x86_cpu
  829. @pytest.mark.env_onecard
  830. def test_positive():
  831. arr = onp.arange(-6, 6).reshape((2, 2, 3)).astype('float32')
  832. out_lst = [onp.ones((2, 2, 3)).astype('float32'), onp.ones((5, 2, 2, 3)).astype('float32')]
  833. where_lst = [onp.full((2, 2, 3), [True, False, True]), onp.full((2, 3), False)]
  834. for out in out_lst:
  835. for where in where_lst:
  836. onp_pos = onp_positive(arr, out=out, where=where)
  837. mnp_pos = mnp_positive(mnp.asarray(arr), mnp.asarray(out), mnp.asarray(where))
  838. match_array(mnp_pos.asnumpy(), onp_pos)
  839. def mnp_negative(x, out, where):
  840. return mnp.negative(x, out=out, where=where)
  841. def onp_negative(x, out, where):
  842. return onp.negative(x, out=out, where=where)
  843. @pytest.mark.level1
  844. @pytest.mark.platform_arm_ascend_training
  845. @pytest.mark.platform_x86_ascend_training
  846. @pytest.mark.platform_x86_gpu_training
  847. @pytest.mark.platform_x86_cpu
  848. @pytest.mark.env_onecard
  849. def test_negative():
  850. arr = onp.arange(-6, 6).reshape((2, 2, 3)).astype('float32')
  851. out_lst = [onp.ones((2, 2, 3)).astype('float32'), onp.ones((5, 2, 2, 3)).astype('float32')]
  852. where_lst = [onp.full((2, 2, 3), [True, False, True]), onp.full((2, 3), False)]
  853. for out in out_lst:
  854. for where in where_lst:
  855. onp_neg = onp_negative(arr, out=out, where=where)
  856. mnp_neg = mnp_negative(mnp.asarray(arr), mnp.asarray(out), mnp.asarray(where))
  857. match_array(mnp_neg.asnumpy(), onp_neg, 1e-5)
  858. @pytest.mark.level1
  859. @pytest.mark.platform_arm_ascend_training
  860. @pytest.mark.platform_x86_ascend_training
  861. @pytest.mark.platform_x86_gpu_training
  862. @pytest.mark.platform_x86_cpu
  863. @pytest.mark.env_onecard
  864. def test_cumsum():
  865. x = mnp.ones((16, 16), dtype="bool")
  866. match_array(mnp.cumsum(x).asnumpy(), onp.cumsum(x.asnumpy()))
  867. match_array(mnp.cumsum(x, axis=0).asnumpy(),
  868. onp.cumsum(x.asnumpy(), axis=0))
  869. match_meta(mnp.cumsum(x).asnumpy(), onp.cumsum(x.asnumpy()))
  870. x = rand_int(3, 4, 5)
  871. match_array(mnp.cumsum(mnp.asarray(x), dtype="bool").asnumpy(),
  872. onp.cumsum(x, dtype="bool"))
  873. match_array(mnp.cumsum(mnp.asarray(x), axis=-1).asnumpy(),
  874. onp.cumsum(x, axis=-1))
  875. @pytest.mark.level1
  876. @pytest.mark.platform_arm_ascend_training
  877. @pytest.mark.platform_x86_ascend_training
  878. @pytest.mark.platform_x86_gpu_training
  879. @pytest.mark.platform_x86_cpu
  880. @pytest.mark.env_onecard
  881. def test_exception_innner():
  882. with pytest.raises(ValueError):
  883. mnp.inner(mnp.asarray(test_case.arrs[0]),
  884. mnp.asarray(test_case.arrs[1]))
  885. @pytest.mark.level1
  886. @pytest.mark.platform_arm_ascend_training
  887. @pytest.mark.platform_x86_ascend_training
  888. @pytest.mark.platform_x86_gpu_training
  889. @pytest.mark.platform_x86_cpu
  890. @pytest.mark.env_onecard
  891. def test_exception_add():
  892. with pytest.raises(ValueError):
  893. mnp.add(mnp.asarray(test_case.arrs[1]), mnp.asarray(test_case.arrs[2]))
  894. @pytest.mark.level1
  895. @pytest.mark.platform_arm_ascend_training
  896. @pytest.mark.platform_x86_ascend_training
  897. @pytest.mark.platform_x86_gpu_training
  898. @pytest.mark.platform_x86_cpu
  899. @pytest.mark.env_onecard
  900. def test_exception_mean():
  901. with pytest.raises(ValueError):
  902. mnp.mean(mnp.asarray(test_case.arrs[0]), (-1, 0))