You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_math_ops.py 31 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """unit tests for numpy math operations"""
  16. import pytest
  17. import numpy as onp
  18. import mindspore.numpy as mnp
  19. from .utils import rand_int, rand_bool, run_binop_test, run_unary_test, run_multi_test, \
  20. run_single_test, match_res, match_array
  21. class Cases():
  22. def __init__(self):
  23. self.arrs = [
  24. rand_int(2),
  25. rand_int(2, 3),
  26. rand_int(2, 3, 4),
  27. rand_int(2, 3, 4, 5),
  28. ]
  29. # scalars expanded across the 0th dimension
  30. self.scalars = [
  31. rand_int(),
  32. rand_int(1),
  33. rand_int(1, 1),
  34. rand_int(1, 1, 1, 1),
  35. ]
  36. # empty arrays
  37. self.empty_arrs = [
  38. rand_int(0),
  39. rand_int(4, 0),
  40. rand_int(2, 0, 2),
  41. rand_int(5, 0, 7, 0),
  42. ]
  43. # arrays of the same size expanded across the 0th dimension
  44. self.expanded_arrs = [
  45. rand_int(2, 3),
  46. rand_int(1, 2, 3),
  47. rand_int(1, 1, 2, 3),
  48. rand_int(1, 1, 1, 2, 3),
  49. ]
  50. # arrays with last dimension aligned
  51. self.aligned_arrs = [
  52. rand_int(2, 3),
  53. rand_int(1, 4, 3),
  54. rand_int(5, 1, 2, 3),
  55. rand_int(4, 2, 1, 1, 3),
  56. ]
  57. # arrays which can be broadcast
  58. self.broadcastables = [
  59. rand_int(5),
  60. rand_int(6, 1),
  61. rand_int(7, 1, 5),
  62. rand_int(8, 1, 6, 1)
  63. ]
  64. # boolean arrays which can be broadcast
  65. self.bool_broadcastables = [
  66. rand_bool(),
  67. rand_bool(1),
  68. rand_bool(5),
  69. rand_bool(6, 1),
  70. rand_bool(7, 1, 5),
  71. rand_bool(8, 1, 6, 1),
  72. ]
  73. # core dimension 0 is matched for each
  74. # pair of array[i] and array[i + 1]
  75. self.core_broadcastables = [
  76. rand_int(3),
  77. rand_int(3),
  78. rand_int(6),
  79. rand_int(6, 4),
  80. rand_int(5, 2),
  81. rand_int(2),
  82. rand_int(2, 9),
  83. rand_int(9, 8),
  84. rand_int(6),
  85. rand_int(2, 6, 5),
  86. rand_int(9, 2, 7),
  87. rand_int(7),
  88. rand_int(5, 2, 4),
  89. rand_int(6, 1, 4, 9),
  90. rand_int(7, 1, 5, 3, 2),
  91. rand_int(8, 1, 6, 1, 2, 9),
  92. ]
  93. # arrays with dimensions of size 1
  94. self.nested_arrs = [
  95. rand_int(1),
  96. rand_int(1, 2),
  97. rand_int(3, 1, 8),
  98. rand_int(1, 3, 9, 1),
  99. ]
  100. test_case = Cases()
  101. def mnp_add(x1, x2):
  102. return mnp.add(x1, x2)
  103. def onp_add(x1, x2):
  104. return onp.add(x1, x2)
  105. def mnp_subtract(x1, x2):
  106. return mnp.subtract(x1, x2)
  107. def onp_subtract(x1, x2):
  108. return onp.subtract(x1, x2)
  109. def mnp_mutiply(x1, x2):
  110. return mnp.multiply(x1, x2)
  111. def onp_multiply(x1, x2):
  112. return onp.multiply(x1, x2)
  113. def mnp_divide(x1, x2):
  114. return mnp.divide(x1, x2)
  115. def onp_divide(x1, x2):
  116. return onp.divide(x1, x2)
  117. def mnp_true_divide(x1, x2):
  118. return mnp.true_divide(x1, x2)
  119. def onp_true_divide(x1, x2):
  120. return onp.true_divide(x1, x2)
  121. def mnp_power(x1, x2):
  122. return mnp.power(x1, x2)
  123. def onp_power(x1, x2):
  124. return onp.power(x1, x2)
  125. def mnp_float_power(x1, x2):
  126. return mnp.float_power(x1, x2)
  127. def onp_float_power(x1, x2):
  128. return onp.float_power(x1, x2)
  129. def mnp_minimum(a, b):
  130. return mnp.minimum(a, b)
  131. def onp_minimum(a, b):
  132. return onp.minimum(a, b)
  133. @pytest.mark.level1
  134. @pytest.mark.platform_arm_ascend_training
  135. @pytest.mark.platform_x86_ascend_training
  136. @pytest.mark.platform_x86_gpu_training
  137. @pytest.mark.platform_x86_cpu
  138. @pytest.mark.env_onecard
  139. def test_add():
  140. run_binop_test(mnp_add, onp_add, test_case)
  141. @pytest.mark.level1
  142. @pytest.mark.platform_arm_ascend_training
  143. @pytest.mark.platform_x86_ascend_training
  144. @pytest.mark.platform_x86_gpu_training
  145. @pytest.mark.platform_x86_cpu
  146. @pytest.mark.env_onecard
  147. def test_subtract():
  148. run_binop_test(mnp_subtract, onp_subtract, test_case)
  149. @pytest.mark.level1
  150. @pytest.mark.platform_arm_ascend_training
  151. @pytest.mark.platform_x86_ascend_training
  152. @pytest.mark.platform_x86_gpu_training
  153. @pytest.mark.platform_x86_cpu
  154. @pytest.mark.env_onecard
  155. def test_multiply():
  156. run_binop_test(mnp_mutiply, onp_multiply, test_case)
  157. @pytest.mark.level1
  158. @pytest.mark.platform_arm_ascend_training
  159. @pytest.mark.platform_x86_ascend_training
  160. @pytest.mark.platform_x86_gpu_training
  161. @pytest.mark.platform_x86_cpu
  162. @pytest.mark.env_onecard
  163. def test_divide():
  164. run_binop_test(mnp_divide, onp_divide, test_case)
  165. @pytest.mark.level1
  166. @pytest.mark.platform_arm_ascend_training
  167. @pytest.mark.platform_x86_ascend_training
  168. @pytest.mark.platform_x86_gpu_training
  169. @pytest.mark.platform_x86_cpu
  170. @pytest.mark.env_onecard
  171. def test_true_divide():
  172. run_binop_test(mnp_true_divide, onp_true_divide, test_case)
  173. @pytest.mark.level1
  174. @pytest.mark.platform_arm_ascend_training
  175. @pytest.mark.platform_x86_ascend_training
  176. @pytest.mark.platform_x86_gpu_training
  177. @pytest.mark.platform_x86_cpu
  178. @pytest.mark.env_onecard
  179. def test_power():
  180. run_binop_test(mnp_power, onp_power, test_case, error=1e-5)
  181. @pytest.mark.level1
  182. @pytest.mark.platform_arm_ascend_training
  183. @pytest.mark.platform_x86_ascend_training
  184. @pytest.mark.platform_x86_gpu_training
  185. @pytest.mark.platform_x86_cpu
  186. @pytest.mark.env_onecard
  187. def test_float_power():
  188. run_binop_test(mnp_float_power, onp_float_power, test_case, error=1e-5)
  189. @pytest.mark.level1
  190. @pytest.mark.platform_arm_ascend_training
  191. @pytest.mark.platform_x86_ascend_training
  192. @pytest.mark.platform_x86_gpu_training
  193. @pytest.mark.platform_x86_cpu
  194. @pytest.mark.env_onecard
  195. def test_minimum():
  196. run_binop_test(mnp_minimum, onp_minimum, test_case)
  197. def mnp_add_kwargs(x, y, where=None, out=None):
  198. return mnp.add(x, y, where=where, out=out)
  199. def onp_add_kwargs(x, y, where=None, out=None):
  200. return onp.add(x, y, where=where, out=out)
  201. @pytest.mark.level1
  202. @pytest.mark.platform_arm_ascend_training
  203. @pytest.mark.platform_x86_ascend_training
  204. @pytest.mark.platform_x86_gpu_training
  205. @pytest.mark.platform_x86_cpu
  206. @pytest.mark.env_onecard
  207. def test_add_kwargs():
  208. for where in test_case.bool_broadcastables[:2]:
  209. for x in test_case.broadcastables[:2]:
  210. for y in test_case.broadcastables[:2]:
  211. shape_out = onp.broadcast(where, x, y).shape
  212. out = rand_int(*shape_out)
  213. match_res(mnp_add_kwargs, onp_add_kwargs, x, y, where, out)
  214. def mnp_tensordot(x, y):
  215. a = mnp.tensordot(x, y)
  216. b = mnp.tensordot(x, y, axes=0)
  217. c = mnp.tensordot(x, y, axes=1)
  218. d = mnp.tensordot(x, y, axes=2)
  219. e = mnp.tensordot(x, y, axes=(3, 0))
  220. f = mnp.tensordot(x, y, axes=[2, 1])
  221. g = mnp.tensordot(x, y, axes=((2, 3), (0, 1)))
  222. h = mnp.tensordot(x, y, axes=[[3, 2], [1, 0]])
  223. return a, b, c, d, e, f, g, h
  224. def onp_tensordot(x, y):
  225. a = onp.tensordot(x, y)
  226. b = onp.tensordot(x, y, axes=0)
  227. c = onp.tensordot(x, y, axes=1)
  228. d = onp.tensordot(x, y, axes=2)
  229. e = onp.tensordot(x, y, axes=(3, 0))
  230. f = onp.tensordot(x, y, axes=[2, 1])
  231. g = onp.tensordot(x, y, axes=((2, 3), (0, 1)))
  232. h = onp.tensordot(x, y, axes=[[3, 2], [1, 0]])
  233. return a, b, c, d, e, f, g, h
  234. @pytest.mark.level1
  235. @pytest.mark.platform_arm_ascend_training
  236. @pytest.mark.platform_x86_ascend_training
  237. @pytest.mark.platform_x86_gpu_training
  238. @pytest.mark.platform_x86_cpu
  239. @pytest.mark.env_onecard
  240. def test_tensordot():
  241. x = rand_int(4, 2, 7, 7)
  242. y = rand_int(7, 7, 6)
  243. run_multi_test(mnp_tensordot, onp_tensordot, (x, y))
  244. def mnp_std(x):
  245. a = mnp.std(x)
  246. b = mnp.std(x, axis=None)
  247. c = mnp.std(x, axis=0)
  248. d = mnp.std(x, axis=1)
  249. e = mnp.std(x, axis=(-1, 1))
  250. f = mnp.std(x, axis=(0, 1, 2))
  251. g = mnp.std(x, axis=None, ddof=1, keepdims=True)
  252. h = mnp.std(x, axis=0, ddof=1, keepdims=True)
  253. i = mnp.std(x, axis=(2), ddof=1, keepdims=True)
  254. return a, b, c, d, e, f, g, h, i
  255. def onp_std(x):
  256. a = onp.std(x)
  257. b = onp.std(x, axis=None)
  258. c = onp.std(x, axis=0)
  259. d = onp.std(x, axis=1)
  260. e = onp.std(x, axis=(-1, 1))
  261. f = onp.std(x, axis=(0, 1, 2))
  262. g = onp.std(x, axis=None, ddof=1, keepdims=True)
  263. h = onp.std(x, axis=0, ddof=1, keepdims=True)
  264. i = onp.std(x, axis=(2), ddof=1, keepdims=True)
  265. return a, b, c, d, e, f, g, h, i
  266. @pytest.mark.level1
  267. @pytest.mark.platform_arm_ascend_training
  268. @pytest.mark.platform_x86_ascend_training
  269. @pytest.mark.platform_x86_gpu_training
  270. @pytest.mark.platform_x86_cpu
  271. @pytest.mark.env_onecard
  272. def test_std():
  273. arr1 = rand_int(2, 3, 4, 5)
  274. arr2 = rand_int(4, 5, 4, 3, 3)
  275. run_single_test(mnp_std, onp_std, arr1, error=1e-5)
  276. run_single_test(mnp_std, onp_std, arr2, error=1e-5)
  277. def mnp_var(x):
  278. a = mnp.std(x)
  279. b = mnp.std(x, axis=0)
  280. c = mnp.std(x, axis=(0))
  281. d = mnp.std(x, axis=(0, 1, 2))
  282. e = mnp.std(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
  283. return a, b, c, d, e
  284. def onp_var(x):
  285. a = onp.std(x)
  286. b = onp.std(x, axis=0)
  287. c = onp.std(x, axis=(0))
  288. d = onp.std(x, axis=(0, 1, 2))
  289. e = onp.std(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
  290. return a, b, c, d, e
  291. @pytest.mark.level1
  292. @pytest.mark.platform_arm_ascend_training
  293. @pytest.mark.platform_x86_ascend_training
  294. @pytest.mark.platform_x86_gpu_training
  295. @pytest.mark.platform_x86_cpu
  296. @pytest.mark.env_onecard
  297. def test_var():
  298. arr1 = rand_int(2, 3, 4, 5)
  299. arr2 = rand_int(4, 5, 4, 3, 3)
  300. run_single_test(mnp_var, onp_var, arr1, error=1e-5)
  301. run_single_test(mnp_var, onp_var, arr2, error=1e-5)
  302. def mnp_average(x):
  303. a = mnp.average(x)
  304. b = mnp.average(x, axis=None)
  305. c = mnp.average(x, axis=0)
  306. d = mnp.average(x, axis=1)
  307. e = mnp.average(x, axis=(-2, 1))
  308. f = mnp.average(x, axis=(0, 1, 2, 3))
  309. g = mnp.average(x, axis=None, weights=x)
  310. h = mnp.average(x, axis=0, weights=x)
  311. i = mnp.average(x, axis=(1, 2, 3), weights=x)
  312. return a, b, c, d, e, f, g, h, i
  313. def onp_average(x):
  314. a = onp.average(x)
  315. b = onp.average(x, axis=None)
  316. c = onp.average(x, axis=0)
  317. d = onp.average(x, axis=1)
  318. e = onp.average(x, axis=(-2, 1))
  319. f = onp.average(x, axis=(0, 1, 2, 3))
  320. g = onp.average(x, axis=None, weights=x)
  321. h = onp.average(x, axis=0, weights=x)
  322. i = onp.average(x, axis=(1, 2, 3), weights=x)
  323. return a, b, c, d, e, f, g, h, i
  324. @pytest.mark.level1
  325. @pytest.mark.platform_arm_ascend_training
  326. @pytest.mark.platform_x86_ascend_training
  327. @pytest.mark.platform_x86_gpu_training
  328. @pytest.mark.platform_x86_cpu
  329. @pytest.mark.env_onecard
  330. def test_average():
  331. arr1 = rand_int(2, 3, 4, 5)
  332. arr2 = rand_int(4, 5, 1, 3, 1)
  333. run_single_test(mnp_average, onp_average, arr1, error=1e-5)
  334. run_single_test(mnp_average, onp_average, arr2, error=1e-5)
  335. def mnp_count_nonzero(x):
  336. a = mnp.count_nonzero(x)
  337. b = mnp.count_nonzero(x, axis=None)
  338. c = mnp.count_nonzero(x, axis=0)
  339. d = mnp.count_nonzero(x, axis=1)
  340. e = mnp.count_nonzero(x, axis=(-2, 1))
  341. f = mnp.count_nonzero(x, axis=(0, 1, 2, 3))
  342. return a, b, c, d, e, f
  343. def onp_count_nonzero(x):
  344. a = onp.count_nonzero(x)
  345. b = onp.count_nonzero(x, axis=None)
  346. c = onp.count_nonzero(x, axis=0)
  347. d = onp.count_nonzero(x, axis=1)
  348. e = onp.count_nonzero(x, axis=(-2, 1))
  349. f = onp.count_nonzero(x, axis=(0, 1, 2, 3))
  350. return a, b, c, d, e, f
  351. @pytest.mark.level1
  352. @pytest.mark.platform_arm_ascend_training
  353. @pytest.mark.platform_x86_ascend_training
  354. @pytest.mark.platform_x86_gpu_training
  355. @pytest.mark.platform_x86_cpu
  356. @pytest.mark.env_onecard
  357. def test_count_nonzero():
  358. # minus 5 to make some values below zero
  359. arr1 = rand_int(2, 3, 4, 5) - 5
  360. arr2 = rand_int(4, 5, 4, 3, 3) - 5
  361. run_single_test(mnp_count_nonzero, onp_count_nonzero, arr1)
  362. run_single_test(mnp_count_nonzero, onp_count_nonzero, arr2)
  363. def mnp_inner(a, b):
  364. return mnp.inner(a, b)
  365. def onp_inner(a, b):
  366. return onp.inner(a, b)
  367. @pytest.mark.level1
  368. @pytest.mark.platform_arm_ascend_training
  369. @pytest.mark.platform_x86_ascend_training
  370. @pytest.mark.platform_x86_gpu_training
  371. @pytest.mark.platform_x86_cpu
  372. @pytest.mark.env_onecard
  373. def test_inner():
  374. for arr1 in test_case.aligned_arrs:
  375. for arr2 in test_case.aligned_arrs:
  376. match_res(mnp_inner, onp_inner, arr1, arr2)
  377. for scalar1 in test_case.scalars:
  378. for scalar2 in test_case.scalars:
  379. match_res(mnp_inner, onp_inner,
  380. scalar1, scalar2)
  381. def mnp_dot(a, b):
  382. return mnp.dot(a, b)
  383. def onp_dot(a, b):
  384. return onp.dot(a, b)
  385. @pytest.mark.level1
  386. @pytest.mark.platform_arm_ascend_training
  387. @pytest.mark.platform_x86_ascend_training
  388. @pytest.mark.platform_x86_gpu_training
  389. @pytest.mark.platform_x86_cpu
  390. @pytest.mark.env_onecard
  391. def test_dot():
  392. # test case (1D, 1D)
  393. match_res(mnp_dot, onp_dot, rand_int(3), rand_int(3))
  394. # test case (2D, 2D)
  395. match_res(mnp_dot, onp_dot, rand_int(4, 7), rand_int(7, 2))
  396. # test case (0D, _) (_, 0D)
  397. match_res(mnp_dot, onp_dot, rand_int(), rand_int(1, 9, 3))
  398. match_res(mnp_dot, onp_dot, rand_int(8, 5, 6, 3), rand_int())
  399. # test case (ND, 1D)
  400. match_res(mnp_dot, onp_dot, rand_int(2, 4, 5), rand_int(5))
  401. # test case (ND, MD)
  402. match_res(mnp_dot, onp_dot, rand_int(5, 4, 1, 8), rand_int(8, 3))
  403. for i in range(8):
  404. match_res(mnp_dot, onp_dot,
  405. test_case.core_broadcastables[2*i], test_case.core_broadcastables[2*i + 1])
  406. def mnp_outer(a, b):
  407. return mnp.outer(a, b)
  408. def onp_outer(a, b):
  409. return onp.outer(a, b)
  410. @pytest.mark.level1
  411. @pytest.mark.platform_arm_ascend_training
  412. @pytest.mark.platform_x86_ascend_training
  413. @pytest.mark.platform_x86_gpu_training
  414. @pytest.mark.platform_x86_cpu
  415. @pytest.mark.env_onecard
  416. def test_outer():
  417. run_binop_test(mnp_outer, onp_outer, test_case)
  418. @pytest.mark.level1
  419. @pytest.mark.platform_arm_ascend_training
  420. @pytest.mark.platform_x86_ascend_training
  421. @pytest.mark.platform_x86_gpu_training
  422. @pytest.mark.platform_x86_cpu
  423. @pytest.mark.env_onecard
  424. def test_type_promotion():
  425. arr = rand_int(2, 3)
  426. onp_sum = onp_add(arr, arr)
  427. a = mnp.asarray(arr, dtype='float16')
  428. b = mnp.asarray(arr, dtype='float32')
  429. c = mnp.asarray(arr, dtype='int32')
  430. match_array(mnp_add(a, b).asnumpy(), onp_sum)
  431. match_array(mnp_add(b, c).asnumpy(), onp_sum)
  432. def mnp_absolute(x):
  433. return mnp.absolute(x)
  434. def onp_absolute(x):
  435. return onp.absolute(x)
  436. @pytest.mark.level1
  437. @pytest.mark.platform_arm_ascend_training
  438. @pytest.mark.platform_x86_ascend_training
  439. @pytest.mark.platform_x86_gpu_training
  440. @pytest.mark.platform_x86_cpu
  441. @pytest.mark.env_onecard
  442. def test_absolute():
  443. arr = rand_int(2, 3)
  444. a = mnp.asarray(arr, dtype='float16')
  445. b = mnp.asarray(arr, dtype='float32')
  446. c = mnp.asarray(arr, dtype='uint8')
  447. d = mnp.asarray(arr, dtype='bool')
  448. match_array(mnp_absolute(a).asnumpy(), onp_absolute(a.asnumpy()))
  449. match_array(mnp_absolute(b).asnumpy(), onp_absolute(b.asnumpy()))
  450. match_array(mnp_absolute(c).asnumpy(), onp_absolute(c.asnumpy()))
  451. match_array(mnp_absolute(d).asnumpy(), onp_absolute(d.asnumpy()))
  452. where = rand_int(2, 3).astype('bool')
  453. out = rand_int(2, 3)
  454. match_array(mnp.absolute(a, out=mnp.asarray(out), where=mnp.asarray(where)).asnumpy(),
  455. onp.absolute(a.asnumpy(), out=out, where=where))
  456. @pytest.mark.level1
  457. @pytest.mark.platform_arm_ascend_training
  458. @pytest.mark.platform_x86_ascend_training
  459. @pytest.mark.platform_x86_gpu_training
  460. @pytest.mark.platform_x86_cpu
  461. @pytest.mark.env_onecard
  462. def test_deg2rad_rad2deg():
  463. arrs = [rand_int(2, 3), rand_int(1, 2, 4), rand_int(2, 4)]
  464. for arr in arrs:
  465. match_res(mnp.deg2rad, onp.deg2rad, arr)
  466. match_res(mnp.rad2deg, onp.rad2deg, arr)
  467. def mnp_ptp(x):
  468. a = mnp.ptp(x)
  469. b = mnp.ptp(x, keepdims=True)
  470. c = mnp.ptp(x, axis=(0, 1))
  471. d = mnp.ptp(x, axis=-1)
  472. return a, b, c, d
  473. def onp_ptp(x):
  474. a = onp.ptp(x)
  475. b = onp.ptp(x, keepdims=True)
  476. c = onp.ptp(x, axis=(0, 1))
  477. d = onp.ptp(x, axis=-1)
  478. return a, b, c, d
  479. @pytest.mark.level1
  480. @pytest.mark.platform_arm_ascend_training
  481. @pytest.mark.platform_x86_ascend_training
  482. @pytest.mark.platform_x86_gpu_training
  483. @pytest.mark.platform_x86_cpu
  484. @pytest.mark.env_onecard
  485. def test_ptp():
  486. arrs = [rand_int(2, 3), rand_int(1, 2, 4), rand_int(2, 4)]
  487. for arr in arrs:
  488. match_res(mnp_ptp, onp_ptp, arr)
  489. def mnp_add_dtype(x1, x2, out, where):
  490. a = mnp.add(x1, x2, dtype=mnp.float16)
  491. b = mnp.add(x1, x2, out=out, dtype=mnp.float16)
  492. c = mnp.add(x1, x2, where=where, dtype=mnp.float16)
  493. d = mnp.add(x1, x2, out=out, where=where, dtype=mnp.float16)
  494. return a, b, c, d
  495. def onp_add_dtype(x1, x2, out, where):
  496. a = onp.add(x1, x2, dtype=onp.float16)
  497. b = onp.add(x1, x2, out=out, dtype=onp.float16)
  498. c = onp.add(x1, x2, where=where, dtype=onp.float16)
  499. d = onp.add(x1, x2, out=out, where=where, dtype=onp.float16)
  500. return a, b, c, d
  501. @pytest.mark.level1
  502. @pytest.mark.platform_arm_ascend_training
  503. @pytest.mark.platform_x86_ascend_training
  504. @pytest.mark.platform_x86_gpu_training
  505. @pytest.mark.platform_x86_cpu
  506. @pytest.mark.env_onecard
  507. def test_add_dtype():
  508. x1 = rand_int(2, 3).astype('int32')
  509. x2 = rand_int(2, 3).astype('int32')
  510. out = rand_int(2, 3).astype('float32')
  511. where = rand_bool(2, 3)
  512. arrs = (x1, x2, out, where)
  513. mnp_arrs = map(mnp.array, arrs)
  514. mnp_res = mnp_add_dtype(*mnp_arrs)
  515. onp_res = onp_add_dtype(*arrs)
  516. for actual, expected in zip(mnp_res, onp_res):
  517. assert actual.asnumpy().dtype == expected.dtype
  518. def mnp_matmul(x1, x2):
  519. return mnp.matmul(x1, x2)
  520. def onp_matmul(x1, x2):
  521. return onp.matmul(x1, x2)
  522. @pytest.mark.level1
  523. @pytest.mark.platform_arm_ascend_training
  524. @pytest.mark.platform_x86_ascend_training
  525. @pytest.mark.platform_x86_gpu_training
  526. @pytest.mark.platform_x86_cpu
  527. @pytest.mark.env_onecard
  528. def test_matmul():
  529. for scalar1 in test_case.scalars[1:]:
  530. for scalar2 in test_case.scalars[1:]:
  531. match_res(mnp_matmul, onp_matmul,
  532. scalar1, scalar2)
  533. for i in range(8):
  534. match_res(mnp_matmul, onp_matmul,
  535. test_case.core_broadcastables[2*i],
  536. test_case.core_broadcastables[2*i + 1])
  537. def mnp_square(x):
  538. return mnp.square(x)
  539. def onp_square(x):
  540. return onp.square(x)
  541. @pytest.mark.level1
  542. @pytest.mark.platform_arm_ascend_training
  543. @pytest.mark.platform_x86_ascend_training
  544. @pytest.mark.platform_x86_gpu_training
  545. @pytest.mark.platform_x86_cpu
  546. @pytest.mark.env_onecard
  547. def test_square():
  548. run_unary_test(mnp_square, onp_square, test_case)
  549. def mnp_sqrt(x):
  550. return mnp.sqrt(x)
  551. def onp_sqrt(x):
  552. return onp.sqrt(x)
  553. @pytest.mark.level1
  554. @pytest.mark.platform_arm_ascend_training
  555. @pytest.mark.platform_x86_ascend_training
  556. @pytest.mark.platform_x86_gpu_training
  557. @pytest.mark.platform_x86_cpu
  558. @pytest.mark.env_onecard
  559. def test_sqrt():
  560. run_unary_test(mnp_sqrt, onp_sqrt, test_case)
  561. def mnp_reciprocal(x):
  562. return mnp.reciprocal(x)
  563. def onp_reciprocal(x):
  564. return onp.reciprocal(x)
  565. @pytest.mark.level1
  566. @pytest.mark.platform_arm_ascend_training
  567. @pytest.mark.platform_x86_ascend_training
  568. @pytest.mark.platform_x86_gpu_training
  569. @pytest.mark.platform_x86_cpu
  570. @pytest.mark.env_onecard
  571. def test_reciprocal():
  572. run_unary_test(mnp_reciprocal, onp_reciprocal, test_case)
  573. def mnp_log(x):
  574. return mnp.log(x)
  575. def onp_log(x):
  576. return onp.log(x)
  577. @pytest.mark.level1
  578. @pytest.mark.platform_arm_ascend_training
  579. @pytest.mark.platform_x86_ascend_training
  580. @pytest.mark.platform_x86_gpu_training
  581. @pytest.mark.platform_x86_cpu
  582. @pytest.mark.env_onecard
  583. def test_log():
  584. run_unary_test(mnp.log, onp.log, test_case, error=1e-5)
  585. def mnp_maximum(x1, x2):
  586. return mnp.maximum(x1, x2)
  587. def onp_maximum(x1, x2):
  588. return onp.maximum(x1, x2)
  589. @pytest.mark.level1
  590. @pytest.mark.platform_arm_ascend_training
  591. @pytest.mark.platform_x86_ascend_training
  592. @pytest.mark.platform_x86_gpu_training
  593. @pytest.mark.platform_x86_cpu
  594. @pytest.mark.env_onecard
  595. def test_maximum():
  596. run_binop_test(mnp_maximum, onp_maximum, test_case)
  597. def mnp_clip(x):
  598. a = mnp.clip(x, mnp.asarray(10.0), mnp.asarray([2,]))
  599. b = mnp.clip(x, 0, 1)
  600. c = mnp.clip(x, mnp.asarray(0), mnp.asarray(10), dtype=mnp.float32)
  601. return a, b, c
  602. def onp_clip(x):
  603. a = onp.clip(x, onp.asarray(10.0), onp.asarray([2,]))
  604. b = onp.clip(x, 0, 1)
  605. c = onp.clip(x, onp.asarray(0), onp.asarray(10), dtype=onp.float32)
  606. return a, b, c
  607. @pytest.mark.level1
  608. @pytest.mark.platform_arm_ascend_training
  609. @pytest.mark.platform_x86_ascend_training
  610. @pytest.mark.platform_x86_gpu_training
  611. @pytest.mark.platform_x86_cpu
  612. @pytest.mark.env_onecard
  613. def test_clip():
  614. run_unary_test(mnp_clip, onp_clip, test_case)
  615. def mnp_amax(x, mask):
  616. a = mnp.amax(x)
  617. b = mnp.amax(x, axis=-3)
  618. c = mnp.amax(x, keepdims=True)
  619. d = mnp.amax(x, initial=3)
  620. e = mnp.amax(x, axis=(0, 1), keepdims=True)
  621. f = mnp.amax(x, initial=4, where=mask)
  622. g = mnp.amax(x, initial=5, where=mask, keepdims=True)
  623. h = mnp.amax(x, axis=(1, 2, 3), initial=6, where=mask)
  624. return a, b, c, d, e, f, g, h
  625. def onp_amax(x, mask):
  626. a = onp.amax(x)
  627. b = onp.amax(x, axis=-3)
  628. c = onp.amax(x, keepdims=True)
  629. d = onp.amax(x, initial=3)
  630. e = onp.amax(x, axis=(0, 1), keepdims=True)
  631. f = onp.amax(x, initial=4, where=mask)
  632. g = onp.amax(x, initial=5, where=mask, keepdims=True)
  633. h = onp.amax(x, axis=(1, 2, 3), initial=6, where=mask)
  634. return a, b, c, d, e, f, g, h
  635. @pytest.mark.level1
  636. @pytest.mark.platform_arm_ascend_training
  637. @pytest.mark.platform_x86_ascend_training
  638. @pytest.mark.platform_x86_gpu_training
  639. @pytest.mark.platform_x86_cpu
  640. @pytest.mark.env_onecard
  641. def test_amax():
  642. a = rand_int(2, 3, 4, 5).astype('float32')
  643. mask = rand_bool(2, 3, 4, 5)
  644. run_multi_test(mnp_amax, onp_amax, (a, mask))
  645. def mnp_amin(x, mask):
  646. a = mnp.amin(x)
  647. b = mnp.amin(x, axis=-3)
  648. c = mnp.amin(x, keepdims=True)
  649. d = mnp.amin(x, initial=-1)
  650. e = mnp.amin(x, axis=(0, 1), keepdims=True)
  651. f = mnp.amin(x, initial=-2, where=mask)
  652. g = mnp.amin(x, initial=-3, where=mask, keepdims=True)
  653. h = mnp.amin(x, axis=(1, 2, 3), initial=-4, where=mask)
  654. return a, b, c, d, e, f, g, h
  655. def onp_amin(x, mask):
  656. a = onp.amin(x)
  657. b = onp.amin(x, axis=-3)
  658. c = onp.amin(x, keepdims=True)
  659. d = onp.amin(x, initial=-1)
  660. e = onp.amin(x, axis=(0, 1), keepdims=True)
  661. f = onp.amin(x, initial=-2, where=mask)
  662. g = onp.amin(x, initial=-3, where=mask, keepdims=True)
  663. h = onp.amin(x, axis=(1, 2, 3), initial=-4, where=mask)
  664. return a, b, c, d, e, f, g, h
  665. @pytest.mark.level1
  666. @pytest.mark.platform_arm_ascend_training
  667. @pytest.mark.platform_x86_ascend_training
  668. @pytest.mark.platform_x86_gpu_training
  669. @pytest.mark.platform_x86_cpu
  670. @pytest.mark.env_onecard
  671. def test_amin():
  672. a = rand_int(2, 3, 4, 5).astype('float32')
  673. mask = rand_bool(2, 3, 4, 5)
  674. run_multi_test(mnp_amin, onp_amin, (a, mask))
  675. def mnp_hypot(x1, x2):
  676. return mnp.hypot(x1, x2)
  677. def onp_hypot(x1, x2):
  678. return onp.hypot(x1, x2)
  679. @pytest.mark.level1
  680. @pytest.mark.platform_arm_ascend_training
  681. @pytest.mark.platform_x86_ascend_training
  682. @pytest.mark.platform_x86_gpu_training
  683. @pytest.mark.platform_x86_cpu
  684. @pytest.mark.env_onecard
  685. def test_hypot():
  686. run_binop_test(mnp_hypot, onp_hypot, test_case)
  687. def mnp_heaviside(x1, x2):
  688. return mnp.heaviside(x1, x2)
  689. def onp_heaviside(x1, x2):
  690. return onp.heaviside(x1, x2)
  691. @pytest.mark.level1
  692. @pytest.mark.platform_arm_ascend_training
  693. @pytest.mark.platform_x86_ascend_training
  694. @pytest.mark.platform_x86_gpu_training
  695. @pytest.mark.platform_x86_cpu
  696. @pytest.mark.env_onecard
  697. def test_heaviside():
  698. broadcastables = test_case.broadcastables
  699. for b1 in broadcastables:
  700. for b2 in broadcastables:
  701. b = onp.subtract(b1, b2)
  702. match_res(mnp_heaviside, onp_heaviside, b, b1)
  703. match_res(mnp_heaviside, onp_heaviside, b, b2)
  704. def mnp_floor(x):
  705. return mnp.floor(x)
  706. def onp_floor(x):
  707. return onp.floor(x)
  708. @pytest.mark.level1
  709. @pytest.mark.platform_arm_ascend_training
  710. @pytest.mark.platform_x86_ascend_training
  711. @pytest.mark.platform_x86_gpu_training
  712. @pytest.mark.platform_x86_cpu
  713. @pytest.mark.env_onecard
  714. def test_floor():
  715. run_unary_test(mnp_floor, onp_floor, test_case)
  716. x = rand_int(2, 3) * onp.random.rand(2, 3)
  717. match_res(mnp_floor, onp_floor, x)
  718. match_res(mnp_floor, onp_floor, -x)
  719. def mnp_floor_divide(x, y):
  720. return mnp.floor_divide(x, y)
  721. def onp_floor_divde(x, y):
  722. return onp.floor_divide(x, y)
  723. @pytest.mark.level1
  724. @pytest.mark.platform_arm_ascend_training
  725. @pytest.mark.platform_x86_ascend_training
  726. @pytest.mark.platform_x86_gpu_training
  727. @pytest.mark.platform_x86_cpu
  728. @pytest.mark.env_onecard
  729. def test_floor_divide():
  730. run_binop_test(mnp_floor_divide, onp_floor_divde, test_case)
  731. def mnp_remainder(x, y):
  732. return mnp.remainder(x, y)
  733. def onp_remainder(x, y):
  734. return onp.remainder(x, y)
  735. @pytest.mark.level1
  736. @pytest.mark.platform_arm_ascend_training
  737. @pytest.mark.platform_x86_ascend_training
  738. @pytest.mark.platform_x86_gpu_training
  739. @pytest.mark.platform_x86_cpu
  740. @pytest.mark.env_onecard
  741. def test_remainder():
  742. run_binop_test(mnp_remainder, onp_remainder, test_case)
  743. def mnp_mod(x, y):
  744. return mnp.mod(x, y)
  745. def onp_mod(x, y):
  746. return onp.mod(x, y)
  747. @pytest.mark.level1
  748. @pytest.mark.platform_arm_ascend_training
  749. @pytest.mark.platform_x86_ascend_training
  750. @pytest.mark.platform_x86_gpu_training
  751. @pytest.mark.platform_x86_cpu
  752. @pytest.mark.env_onecard
  753. def test_mod():
  754. run_binop_test(mnp_mod, onp_mod, test_case)
  755. def mnp_fmod(x, y):
  756. return mnp.fmod(x, y)
  757. def onp_fmod(x, y):
  758. return onp.fmod(x, y)
  759. @pytest.mark.level1
  760. @pytest.mark.platform_arm_ascend_training
  761. @pytest.mark.platform_x86_ascend_training
  762. @pytest.mark.platform_x86_gpu_training
  763. @pytest.mark.platform_x86_cpu
  764. @pytest.mark.env_onecard
  765. def test_fmod():
  766. run_binop_test(mnp_fmod, onp_fmod, test_case)
  767. def mnp_fix(x):
  768. return mnp.fix(x)
  769. def onp_fix(x):
  770. return onp.fix(x)
  771. @pytest.mark.level1
  772. @pytest.mark.platform_arm_ascend_training
  773. @pytest.mark.platform_x86_ascend_training
  774. @pytest.mark.platform_x86_gpu_training
  775. @pytest.mark.platform_x86_cpu
  776. @pytest.mark.env_onecard
  777. def test_fix():
  778. x = rand_int(2, 3)
  779. y = rand_int(2, 3)
  780. floats = onp.divide(onp.subtract(x, y), y)
  781. match_res(mnp_fix, onp_fix, floats, error=1e-5)
  782. run_binop_test(mnp_fmod, onp_fmod, test_case, error=1e-5)
  783. def mnp_trunc(x):
  784. return mnp.trunc(x)
  785. def onp_trunc(x):
  786. return onp.trunc(x)
  787. @pytest.mark.level1
  788. @pytest.mark.platform_arm_ascend_training
  789. @pytest.mark.platform_x86_ascend_training
  790. @pytest.mark.platform_x86_gpu_training
  791. @pytest.mark.platform_x86_cpu
  792. @pytest.mark.env_onecard
  793. def test_trunc():
  794. x = rand_int(2, 3)
  795. y = rand_int(2, 3)
  796. floats = onp.divide(onp.subtract(x, y), y)
  797. match_res(mnp_trunc, onp_trunc, floats, error=1e-5)
  798. def mnp_exp(x):
  799. return mnp.exp(x)
  800. def onp_exp(x):
  801. return onp.exp(x)
  802. @pytest.mark.level1
  803. @pytest.mark.platform_arm_ascend_training
  804. @pytest.mark.platform_x86_ascend_training
  805. @pytest.mark.platform_x86_gpu_training
  806. @pytest.mark.platform_x86_cpu
  807. @pytest.mark.env_onecard
  808. def test_exp():
  809. run_unary_test(mnp_exp, onp_exp, test_case, error=5)
  810. def mnp_expm1(x):
  811. return mnp.expm1(x)
  812. def onp_expm1(x):
  813. return onp.expm1(x)
  814. @pytest.mark.level1
  815. @pytest.mark.platform_arm_ascend_training
  816. @pytest.mark.platform_x86_ascend_training
  817. @pytest.mark.platform_x86_gpu_training
  818. @pytest.mark.platform_x86_cpu
  819. @pytest.mark.env_onecard
  820. def test_expm1():
  821. run_unary_test(mnp_expm1, onp_expm1, test_case, error=5)
  822. def mnp_positive(x, out, where):
  823. return mnp.positive(x, out=out, where=where)
  824. def onp_positive(x, out, where):
  825. return onp.positive(x, out=out, where=where)
  826. @pytest.mark.level1
  827. @pytest.mark.platform_arm_ascend_training
  828. @pytest.mark.platform_x86_ascend_training
  829. @pytest.mark.platform_x86_gpu_training
  830. @pytest.mark.platform_x86_cpu
  831. @pytest.mark.env_onecard
  832. def test_positive():
  833. arr = onp.arange(-6, 6).reshape((2, 2, 3)).astype('float32')
  834. out_lst = [onp.ones((2, 2, 3)).astype('float32'), onp.ones((5, 2, 2, 3)).astype('float32')]
  835. where_lst = [onp.full((2, 2, 3), [True, False, True]), onp.full((2, 3), False)]
  836. for out in out_lst:
  837. for where in where_lst:
  838. onp_pos = onp_positive(arr, out=out, where=where)
  839. mnp_pos = mnp_positive(mnp.asarray(arr), mnp.asarray(out), mnp.asarray(where))
  840. match_array(mnp_pos.asnumpy(), onp_pos)
  841. def mnp_negative(x, out, where):
  842. return mnp.negative(x, out=out, where=where)
  843. def onp_negative(x, out, where):
  844. return onp.negative(x, out=out, where=where)
  845. @pytest.mark.level1
  846. @pytest.mark.platform_arm_ascend_training
  847. @pytest.mark.platform_x86_ascend_training
  848. @pytest.mark.platform_x86_gpu_training
  849. @pytest.mark.platform_x86_cpu
  850. @pytest.mark.env_onecard
  851. def test_negative():
  852. arr = onp.arange(-6, 6).reshape((2, 2, 3)).astype('float32')
  853. out_lst = [onp.ones((2, 2, 3)).astype('float32'), onp.ones((5, 2, 2, 3)).astype('float32')]
  854. where_lst = [onp.full((2, 2, 3), [True, False, True]), onp.full((2, 3), False)]
  855. for out in out_lst:
  856. for where in where_lst:
  857. onp_neg = onp_negative(arr, out=out, where=where)
  858. mnp_neg = mnp_negative(mnp.asarray(arr), mnp.asarray(out), mnp.asarray(where))
  859. match_array(mnp_neg.asnumpy(), onp_neg, 1e-5)
  860. @pytest.mark.level1
  861. @pytest.mark.platform_arm_ascend_training
  862. @pytest.mark.platform_x86_ascend_training
  863. @pytest.mark.platform_x86_gpu_training
  864. @pytest.mark.platform_x86_cpu
  865. @pytest.mark.env_onecard
  866. def test_exception_innner():
  867. with pytest.raises(ValueError):
  868. mnp.inner(mnp.asarray(test_case.arrs[0]),
  869. mnp.asarray(test_case.arrs[1]))
  870. @pytest.mark.level1
  871. @pytest.mark.platform_arm_ascend_training
  872. @pytest.mark.platform_x86_ascend_training
  873. @pytest.mark.platform_x86_gpu_training
  874. @pytest.mark.platform_x86_cpu
  875. @pytest.mark.env_onecard
  876. def test_exception_add():
  877. with pytest.raises(ValueError):
  878. mnp.add(mnp.asarray(test_case.arrs[1]), mnp.asarray(test_case.arrs[2]))
  879. @pytest.mark.level1
  880. @pytest.mark.platform_arm_ascend_training
  881. @pytest.mark.platform_x86_ascend_training
  882. @pytest.mark.platform_x86_gpu_training
  883. @pytest.mark.platform_x86_cpu
  884. @pytest.mark.env_onecard
  885. def test_exception_mean():
  886. with pytest.raises(ValueError):
  887. mnp.mean(mnp.asarray(test_case.arrs[0]), (-1, 0))