You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_math_ops.py 52 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """unit tests for numpy math operations"""
  16. import pytest
  17. import numpy as onp
  18. import mindspore.numpy as mnp
  19. from .utils import rand_int, rand_bool, run_binop_test, run_unary_test, run_multi_test, \
  20. run_single_test, match_res, match_array, match_meta, match_all_arrays, to_tensor
  21. class Cases():
  22. def __init__(self):
  23. self.arrs = [
  24. rand_int(2),
  25. rand_int(2, 3),
  26. rand_int(2, 3, 4),
  27. ]
  28. # scalars expanded across the 0th dimension
  29. self.scalars = [
  30. rand_int(),
  31. rand_int(1),
  32. rand_int(1, 1),
  33. ]
  34. # arrays of the same size expanded across the 0th dimension
  35. self.expanded_arrs = [
  36. rand_int(2, 3),
  37. rand_int(1, 2, 3),
  38. rand_int(1, 1, 2, 3),
  39. ]
  40. # arrays with last dimension aligned
  41. self.aligned_arrs = [
  42. rand_int(2, 3),
  43. rand_int(1, 4, 3),
  44. rand_int(5, 1, 2, 3),
  45. rand_int(4, 2, 1, 1, 3),
  46. ]
  47. # arrays which can be broadcast
  48. self.broadcastables = [
  49. rand_int(5),
  50. rand_int(6, 1),
  51. rand_int(7, 1, 5),
  52. ]
  53. # boolean arrays which can be broadcast
  54. self.bool_broadcastables = [
  55. rand_bool(),
  56. rand_bool(1),
  57. rand_bool(5),
  58. rand_bool(6, 1),
  59. rand_bool(7, 1, 5),
  60. rand_bool(8, 1, 6, 1),
  61. ]
  62. # core dimension 0 is matched for each
  63. # pair of array[i] and array[i + 1]
  64. self.core_broadcastables = [
  65. rand_int(3),
  66. rand_int(3),
  67. rand_int(6),
  68. rand_int(6, 4),
  69. rand_int(5, 2),
  70. rand_int(2),
  71. rand_int(2, 9),
  72. rand_int(9, 8),
  73. rand_int(6),
  74. rand_int(2, 6, 5),
  75. rand_int(9, 2, 7),
  76. rand_int(7),
  77. rand_int(5, 2, 4),
  78. rand_int(6, 1, 4, 9),
  79. rand_int(7, 1, 5, 3, 2),
  80. rand_int(8, 1, 6, 1, 2, 9),
  81. ]
  82. # arrays with dimensions of size 1
  83. self.nested_arrs = [
  84. rand_int(1),
  85. rand_int(1, 2),
  86. rand_int(3, 1, 8),
  87. rand_int(1, 3, 9, 1),
  88. ]
  89. test_case = Cases()
  90. def mnp_add(x1, x2):
  91. return mnp.add(x1, x2)
  92. def onp_add(x1, x2):
  93. return onp.add(x1, x2)
  94. def mnp_subtract(x1, x2):
  95. return mnp.subtract(x1, x2)
  96. def onp_subtract(x1, x2):
  97. return onp.subtract(x1, x2)
  98. def mnp_mutiply(x1, x2):
  99. return mnp.multiply(x1, x2)
  100. def onp_multiply(x1, x2):
  101. return onp.multiply(x1, x2)
  102. def mnp_divide(x1, x2):
  103. return mnp.divide(x1, x2)
  104. def onp_divide(x1, x2):
  105. return onp.divide(x1, x2)
  106. def mnp_true_divide(x1, x2):
  107. return mnp.true_divide(x1, x2)
  108. def onp_true_divide(x1, x2):
  109. return onp.true_divide(x1, x2)
  110. def mnp_power(x1, x2):
  111. return mnp.power(x1, x2)
  112. def onp_power(x1, x2):
  113. return onp.power(x1, x2)
  114. def mnp_float_power(x1, x2):
  115. return mnp.float_power(x1, x2)
  116. def onp_float_power(x1, x2):
  117. return onp.float_power(x1, x2)
  118. def mnp_minimum(a, b):
  119. return mnp.minimum(a, b)
  120. def onp_minimum(a, b):
  121. return onp.minimum(a, b)
  122. @pytest.mark.level1
  123. @pytest.mark.platform_arm_ascend_training
  124. @pytest.mark.platform_x86_ascend_training
  125. @pytest.mark.platform_x86_gpu_training
  126. @pytest.mark.platform_x86_cpu
  127. @pytest.mark.env_onecard
  128. def test_add():
  129. run_binop_test(mnp_add, onp_add, test_case)
  130. @pytest.mark.level1
  131. @pytest.mark.platform_arm_ascend_training
  132. @pytest.mark.platform_x86_ascend_training
  133. @pytest.mark.platform_x86_gpu_training
  134. @pytest.mark.platform_x86_cpu
  135. @pytest.mark.env_onecard
  136. def test_subtract():
  137. run_binop_test(mnp_subtract, onp_subtract, test_case)
  138. @pytest.mark.level1
  139. @pytest.mark.platform_arm_ascend_training
  140. @pytest.mark.platform_x86_ascend_training
  141. @pytest.mark.platform_x86_gpu_training
  142. @pytest.mark.platform_x86_cpu
  143. @pytest.mark.env_onecard
  144. def test_multiply():
  145. run_binop_test(mnp_mutiply, onp_multiply, test_case)
  146. @pytest.mark.level1
  147. @pytest.mark.platform_arm_ascend_training
  148. @pytest.mark.platform_x86_ascend_training
  149. @pytest.mark.platform_x86_gpu_training
  150. @pytest.mark.platform_x86_cpu
  151. @pytest.mark.env_onecard
  152. def test_divide():
  153. run_binop_test(mnp_divide, onp_divide, test_case)
  154. @pytest.mark.level1
  155. @pytest.mark.platform_arm_ascend_training
  156. @pytest.mark.platform_x86_ascend_training
  157. @pytest.mark.platform_x86_gpu_training
  158. @pytest.mark.platform_x86_cpu
  159. @pytest.mark.env_onecard
  160. def test_true_divide():
  161. run_binop_test(mnp_true_divide, onp_true_divide, test_case)
  162. @pytest.mark.level1
  163. @pytest.mark.platform_arm_ascend_training
  164. @pytest.mark.platform_x86_ascend_training
  165. @pytest.mark.platform_x86_gpu_training
  166. @pytest.mark.platform_x86_cpu
  167. @pytest.mark.env_onecard
  168. def test_power():
  169. run_binop_test(mnp_power, onp_power, test_case, error=1e-5)
  170. @pytest.mark.level1
  171. @pytest.mark.platform_arm_ascend_training
  172. @pytest.mark.platform_x86_ascend_training
  173. @pytest.mark.platform_x86_gpu_training
  174. @pytest.mark.platform_x86_cpu
  175. @pytest.mark.env_onecard
  176. def test_float_power():
  177. run_binop_test(mnp_float_power, onp_float_power, test_case, error=1e-5)
  178. @pytest.mark.level1
  179. @pytest.mark.platform_x86_gpu_training
  180. @pytest.mark.platform_x86_cpu
  181. @pytest.mark.env_onecard
  182. def test_minimum():
  183. run_binop_test(mnp_minimum, onp_minimum, test_case)
  184. x = onp.random.randint(-10, 10, 20).astype(onp.float32)
  185. y = onp.random.randint(-10, 10, 20).astype(onp.float32)
  186. x[onp.random.randint(0, 10, 3)] = onp.nan
  187. y[onp.random.randint(0, 10, 3)] = onp.nan
  188. x[onp.random.randint(0, 10, 3)] = onp.NINF
  189. y[onp.random.randint(0, 10, 3)] = onp.NINF
  190. x[onp.random.randint(0, 10, 3)] = onp.PINF
  191. y[onp.random.randint(0, 10, 3)] = onp.PINF
  192. match_res(mnp_minimum, onp_minimum, x, y)
  193. match_res(mnp_minimum, onp_minimum, y, x)
  194. def mnp_tensordot(x, y):
  195. a = mnp.tensordot(x, y)
  196. b = mnp.tensordot(x, y, axes=0)
  197. c = mnp.tensordot(x, y, axes=1)
  198. d = mnp.tensordot(x, y, axes=2)
  199. e = mnp.tensordot(x, y, axes=(3, 0))
  200. f = mnp.tensordot(x, y, axes=[2, 1])
  201. g = mnp.tensordot(x, y, axes=((2, 3), (0, 1)))
  202. h = mnp.tensordot(x, y, axes=[[3, 2], [1, 0]])
  203. return a, b, c, d, e, f, g, h
  204. def onp_tensordot(x, y):
  205. a = onp.tensordot(x, y)
  206. b = onp.tensordot(x, y, axes=0)
  207. c = onp.tensordot(x, y, axes=1)
  208. d = onp.tensordot(x, y, axes=2)
  209. e = onp.tensordot(x, y, axes=(3, 0))
  210. f = onp.tensordot(x, y, axes=[2, 1])
  211. g = onp.tensordot(x, y, axes=((2, 3), (0, 1)))
  212. h = onp.tensordot(x, y, axes=[[3, 2], [1, 0]])
  213. return a, b, c, d, e, f, g, h
  214. @pytest.mark.level1
  215. @pytest.mark.platform_arm_ascend_training
  216. @pytest.mark.platform_x86_ascend_training
  217. @pytest.mark.platform_x86_gpu_training
  218. @pytest.mark.platform_x86_cpu
  219. @pytest.mark.env_onecard
  220. def test_tensordot():
  221. x = rand_int(4, 2, 7, 7)
  222. y = rand_int(7, 7, 6)
  223. run_multi_test(mnp_tensordot, onp_tensordot, (x, y))
  224. def mnp_std(x):
  225. a = mnp.std(x)
  226. b = mnp.std(x, axis=None)
  227. c = mnp.std(x, axis=0)
  228. d = mnp.std(x, axis=1)
  229. e = mnp.std(x, axis=(-1, 1))
  230. f = mnp.std(x, axis=(0, 1, 2))
  231. g = mnp.std(x, axis=None, ddof=1, keepdims=True)
  232. h = mnp.std(x, axis=0, ddof=1, keepdims=True)
  233. i = mnp.std(x, axis=(2), ddof=1, keepdims=True)
  234. return a, b, c, d, e, f, g, h, i
  235. def onp_std(x):
  236. a = onp.std(x)
  237. b = onp.std(x, axis=None)
  238. c = onp.std(x, axis=0)
  239. d = onp.std(x, axis=1)
  240. e = onp.std(x, axis=(-1, 1))
  241. f = onp.std(x, axis=(0, 1, 2))
  242. g = onp.std(x, axis=None, ddof=1, keepdims=True)
  243. h = onp.std(x, axis=0, ddof=1, keepdims=True)
  244. i = onp.std(x, axis=(2), ddof=1, keepdims=True)
  245. return a, b, c, d, e, f, g, h, i
  246. @pytest.mark.level1
  247. @pytest.mark.platform_arm_ascend_training
  248. @pytest.mark.platform_x86_ascend_training
  249. @pytest.mark.platform_x86_gpu_training
  250. @pytest.mark.platform_x86_cpu
  251. @pytest.mark.env_onecard
  252. def test_std():
  253. arr1 = rand_int(2, 3, 4, 5)
  254. arr2 = rand_int(4, 5, 4, 3, 3)
  255. run_single_test(mnp_std, onp_std, arr1, error=1e-5)
  256. run_single_test(mnp_std, onp_std, arr2, error=1e-5)
  257. def mnp_nanstd(x):
  258. a = mnp.nanstd(x)
  259. b = mnp.nanstd(x, axis=None)
  260. c = mnp.nanstd(x, axis=0)
  261. d = mnp.nanstd(x, axis=1)
  262. e = mnp.nanstd(x, axis=(-1, 1))
  263. f = mnp.nanstd(x, axis=(0, 1, 2))
  264. g = mnp.nanstd(x, axis=None, ddof=1, keepdims=True)
  265. h = mnp.nanstd(x, axis=0, ddof=1, keepdims=True)
  266. i = mnp.nanstd(x, axis=(2), ddof=1, keepdims=True)
  267. return a, b, c, d, e, f, g, h, i
  268. def onp_nanstd(x):
  269. a = onp.nanstd(x)
  270. b = onp.nanstd(x, axis=None)
  271. c = onp.nanstd(x, axis=0)
  272. d = onp.nanstd(x, axis=1)
  273. e = onp.nanstd(x, axis=(-1, 1))
  274. f = onp.nanstd(x, axis=(0, 1, 2))
  275. g = onp.nanstd(x, axis=None, ddof=1, keepdims=True)
  276. h = onp.nanstd(x, axis=0, ddof=1, keepdims=True)
  277. i = onp.nanstd(x, axis=(2), ddof=1, keepdims=True)
  278. return a, b, c, d, e, f, g, h, i
  279. @pytest.mark.level1
  280. @pytest.mark.platform_x86_gpu_training
  281. @pytest.mark.platform_x86_cpu
  282. @pytest.mark.env_onecard
  283. def test_nanstd():
  284. arr1 = rand_int(2, 3, 4, 5)
  285. arr1[0][2][1][3] = onp.nan
  286. arr1[1][0][2][4] = onp.nan
  287. arr1[1][1][1][1] = onp.nan
  288. arr2 = rand_int(4, 5, 4, 3, 3)
  289. arr2[3][1][2][1][0] = onp.nan
  290. arr2[1][1][1][1][1] = onp.nan
  291. arr2[0][4][3][0][2] = onp.nan
  292. run_single_test(mnp_nanstd, onp_nanstd, arr1, error=1e-5)
  293. run_single_test(mnp_nanstd, onp_nanstd, arr2, error=1e-5)
  294. match_res(mnp.nanstd, onp.nanstd, rand_int())
  295. def mnp_var(x):
  296. a = mnp.var(x)
  297. b = mnp.var(x, axis=0)
  298. c = mnp.var(x, axis=(0))
  299. d = mnp.var(x, axis=(0, 1, 2))
  300. e = mnp.var(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
  301. return a, b, c, d, e
  302. def onp_var(x):
  303. a = onp.var(x)
  304. b = onp.var(x, axis=0)
  305. c = onp.var(x, axis=(0))
  306. d = onp.var(x, axis=(0, 1, 2))
  307. e = onp.var(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
  308. return a, b, c, d, e
  309. @pytest.mark.level1
  310. @pytest.mark.platform_arm_ascend_training
  311. @pytest.mark.platform_x86_ascend_training
  312. @pytest.mark.platform_x86_gpu_training
  313. @pytest.mark.platform_x86_cpu
  314. @pytest.mark.env_onecard
  315. def test_var():
  316. arr1 = rand_int(2, 3, 4, 5)
  317. arr2 = rand_int(4, 5, 4, 3, 3)
  318. run_single_test(mnp_var, onp_var, arr1, error=1e-5)
  319. run_single_test(mnp_var, onp_var, arr2, error=1e-5)
  320. def mnp_nanvar(x):
  321. a = mnp.var(x)
  322. b = mnp.var(x, axis=0)
  323. c = mnp.var(x, axis=(0))
  324. d = mnp.var(x, axis=(0, 1, 2))
  325. e = mnp.var(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
  326. return a, b, c, d, e
  327. def onp_nanvar(x):
  328. a = onp.var(x)
  329. b = onp.var(x, axis=0)
  330. c = onp.var(x, axis=(0))
  331. d = onp.var(x, axis=(0, 1, 2))
  332. e = onp.var(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
  333. return a, b, c, d, e
  334. @pytest.mark.level1
  335. @pytest.mark.platform_x86_gpu_training
  336. @pytest.mark.platform_x86_cpu
  337. @pytest.mark.env_onecard
  338. def test_nanvar():
  339. arr1 = rand_int(2, 3, 4, 5)
  340. arr1[0][2][1][3] = onp.nan
  341. arr1[1][0][2][4] = onp.nan
  342. arr1[1][1][1][1] = onp.nan
  343. arr2 = rand_int(4, 5, 4, 3, 3)
  344. arr2[3][1][2][1][0] = onp.nan
  345. arr2[1][1][1][1][1] = onp.nan
  346. arr2[0][4][3][0][2] = onp.nan
  347. run_single_test(mnp_nanvar, onp_nanvar, arr1, error=1e-5)
  348. run_single_test(mnp_nanvar, onp_nanvar, arr2, error=1e-5)
  349. match_res(mnp.nanvar, onp.nanvar, rand_int())
  350. def mnp_average(x):
  351. a = mnp.average(x)
  352. b = mnp.average(x, axis=None)
  353. c = mnp.average(x, axis=0)
  354. d = mnp.average(x, axis=1)
  355. e = mnp.average(x, axis=(-2, 1))
  356. f = mnp.average(x, axis=(0, 1, 2, 3))
  357. g = mnp.average(x, axis=None, weights=x)
  358. h = mnp.average(x, axis=0, weights=x)
  359. i = mnp.average(x, axis=(1, 2, 3), weights=x)
  360. return a, b, c, d, e, f, g, h, i
  361. def onp_average(x):
  362. a = onp.average(x)
  363. b = onp.average(x, axis=None)
  364. c = onp.average(x, axis=0)
  365. d = onp.average(x, axis=1)
  366. e = onp.average(x, axis=(-2, 1))
  367. f = onp.average(x, axis=(0, 1, 2, 3))
  368. g = onp.average(x, axis=None, weights=x)
  369. h = onp.average(x, axis=0, weights=x)
  370. i = onp.average(x, axis=(1, 2, 3), weights=x)
  371. return a, b, c, d, e, f, g, h, i
  372. @pytest.mark.level1
  373. @pytest.mark.platform_arm_ascend_training
  374. @pytest.mark.platform_x86_ascend_training
  375. @pytest.mark.platform_x86_gpu_training
  376. @pytest.mark.platform_x86_cpu
  377. @pytest.mark.env_onecard
  378. def test_average():
  379. arr1 = rand_int(2, 3, 4, 5)
  380. arr2 = rand_int(4, 5, 1, 3, 1)
  381. run_single_test(mnp_average, onp_average, arr1, error=1e-5)
  382. run_single_test(mnp_average, onp_average, arr2, error=1e-5)
  383. def mnp_count_nonzero(x):
  384. a = mnp.count_nonzero(x)
  385. b = mnp.count_nonzero(x, axis=None)
  386. c = mnp.count_nonzero(x, axis=0)
  387. d = mnp.count_nonzero(x, axis=1)
  388. e = mnp.count_nonzero(x, axis=(-2, 1))
  389. f = mnp.count_nonzero(x, axis=(0, 1, 2, 3))
  390. return a, b, c, d, e, f
  391. def onp_count_nonzero(x):
  392. a = onp.count_nonzero(x)
  393. b = onp.count_nonzero(x, axis=None)
  394. c = onp.count_nonzero(x, axis=0)
  395. d = onp.count_nonzero(x, axis=1)
  396. e = onp.count_nonzero(x, axis=(-2, 1))
  397. f = onp.count_nonzero(x, axis=(0, 1, 2, 3))
  398. return a, b, c, d, e, f
  399. @pytest.mark.level1
  400. @pytest.mark.platform_arm_ascend_training
  401. @pytest.mark.platform_x86_ascend_training
  402. @pytest.mark.platform_x86_gpu_training
  403. @pytest.mark.platform_x86_cpu
  404. @pytest.mark.env_onecard
  405. def test_count_nonzero():
  406. # minus 5 to make some values below zero
  407. arr1 = rand_int(2, 3, 4, 5) - 5
  408. arr2 = rand_int(4, 5, 4, 3, 3) - 5
  409. run_single_test(mnp_count_nonzero, onp_count_nonzero, arr1)
  410. run_single_test(mnp_count_nonzero, onp_count_nonzero, arr2)
  411. def mnp_inner(a, b):
  412. return mnp.inner(a, b)
  413. def onp_inner(a, b):
  414. return onp.inner(a, b)
  415. @pytest.mark.level1
  416. @pytest.mark.platform_arm_ascend_training
  417. @pytest.mark.platform_x86_ascend_training
  418. @pytest.mark.platform_x86_gpu_training
  419. @pytest.mark.platform_x86_cpu
  420. @pytest.mark.env_onecard
  421. def test_inner():
  422. for arr1 in test_case.aligned_arrs:
  423. for arr2 in test_case.aligned_arrs:
  424. match_res(mnp_inner, onp_inner, arr1, arr2)
  425. for scalar1 in test_case.scalars:
  426. for scalar2 in test_case.scalars:
  427. match_res(mnp_inner, onp_inner,
  428. scalar1, scalar2)
  429. def mnp_dot(a, b):
  430. return mnp.dot(a, b)
  431. def onp_dot(a, b):
  432. return onp.dot(a, b)
  433. @pytest.mark.level1
  434. @pytest.mark.platform_arm_ascend_training
  435. @pytest.mark.platform_x86_ascend_training
  436. @pytest.mark.platform_x86_gpu_training
  437. @pytest.mark.platform_x86_cpu
  438. @pytest.mark.env_onecard
  439. def test_dot():
  440. # test case (1D, 1D)
  441. match_res(mnp_dot, onp_dot, rand_int(3), rand_int(3))
  442. # test case (2D, 2D)
  443. match_res(mnp_dot, onp_dot, rand_int(4, 7), rand_int(7, 2))
  444. # test case (0D, _) (_, 0D)
  445. match_res(mnp_dot, onp_dot, rand_int(), rand_int(1, 9, 3))
  446. match_res(mnp_dot, onp_dot, rand_int(8, 5, 6, 3), rand_int())
  447. # test case (ND, 1D)
  448. match_res(mnp_dot, onp_dot, rand_int(2, 4, 5), rand_int(5))
  449. # test case (ND, MD)
  450. match_res(mnp_dot, onp_dot, rand_int(5, 4, 1, 8), rand_int(8, 3))
  451. for i in range(8):
  452. match_res(mnp_dot, onp_dot,
  453. test_case.core_broadcastables[2*i], test_case.core_broadcastables[2*i + 1])
  454. def mnp_outer(a, b):
  455. return mnp.outer(a, b)
  456. def onp_outer(a, b):
  457. return onp.outer(a, b)
  458. @pytest.mark.level1
  459. @pytest.mark.platform_arm_ascend_training
  460. @pytest.mark.platform_x86_ascend_training
  461. @pytest.mark.platform_x86_gpu_training
  462. @pytest.mark.platform_x86_cpu
  463. @pytest.mark.env_onecard
  464. def test_outer():
  465. run_binop_test(mnp_outer, onp_outer, test_case)
  466. @pytest.mark.level1
  467. @pytest.mark.platform_arm_ascend_training
  468. @pytest.mark.platform_x86_ascend_training
  469. @pytest.mark.platform_x86_gpu_training
  470. @pytest.mark.platform_x86_cpu
  471. @pytest.mark.env_onecard
  472. def test_type_promotion():
  473. arr = rand_int(2, 3)
  474. onp_sum = onp_add(arr, arr)
  475. a = to_tensor(arr, dtype=mnp.float16)
  476. b = to_tensor(arr, dtype=mnp.float32)
  477. c = to_tensor(arr, dtype=mnp.int32)
  478. match_array(mnp_add(a, b).asnumpy(), onp_sum)
  479. match_array(mnp_add(b, c).asnumpy(), onp_sum)
  480. def mnp_absolute(x):
  481. return mnp.absolute(x)
  482. def onp_absolute(x):
  483. return onp.absolute(x)
  484. @pytest.mark.level1
  485. @pytest.mark.platform_arm_ascend_training
  486. @pytest.mark.platform_x86_ascend_training
  487. @pytest.mark.platform_x86_gpu_training
  488. @pytest.mark.platform_x86_cpu
  489. @pytest.mark.env_onecard
  490. def test_absolute():
  491. arr = rand_int(2, 3)
  492. a = to_tensor(arr, dtype=mnp.float16)
  493. b = to_tensor(arr, dtype=mnp.float32)
  494. c = to_tensor(arr, dtype=mnp.uint8)
  495. d = to_tensor(arr, dtype=mnp.bool_)
  496. match_array(mnp_absolute(a).asnumpy(), onp_absolute(a.asnumpy()))
  497. match_array(mnp_absolute(b).asnumpy(), onp_absolute(b.asnumpy()))
  498. match_array(mnp_absolute(c).asnumpy(), onp_absolute(c.asnumpy()))
  499. match_array(mnp_absolute(d).asnumpy(), onp_absolute(d.asnumpy()))
  500. @pytest.mark.level1
  501. @pytest.mark.platform_arm_ascend_training
  502. @pytest.mark.platform_x86_ascend_training
  503. @pytest.mark.platform_x86_gpu_training
  504. @pytest.mark.platform_x86_cpu
  505. @pytest.mark.env_onecard
  506. def test_deg2rad_rad2deg():
  507. arrs = [rand_int(2, 3), rand_int(1, 2, 4), rand_int(2, 4)]
  508. for arr in arrs:
  509. match_res(mnp.deg2rad, onp.deg2rad, arr)
  510. match_res(mnp.rad2deg, onp.rad2deg, arr)
  511. def mnp_ptp(x):
  512. a = mnp.ptp(x)
  513. b = mnp.ptp(x, keepdims=True)
  514. c = mnp.ptp(x, axis=(0, 1))
  515. d = mnp.ptp(x, axis=-1)
  516. return a, b, c, d
  517. def onp_ptp(x):
  518. a = onp.ptp(x)
  519. b = onp.ptp(x, keepdims=True)
  520. c = onp.ptp(x, axis=(0, 1))
  521. d = onp.ptp(x, axis=-1)
  522. return a, b, c, d
  523. @pytest.mark.level1
  524. @pytest.mark.platform_arm_ascend_training
  525. @pytest.mark.platform_x86_ascend_training
  526. @pytest.mark.platform_x86_gpu_training
  527. @pytest.mark.platform_x86_cpu
  528. @pytest.mark.env_onecard
  529. def test_ptp():
  530. arrs = [rand_int(2, 3), rand_int(1, 2, 4), rand_int(2, 4)]
  531. for arr in arrs:
  532. match_res(mnp_ptp, onp_ptp, arr)
  533. def mnp_add_dtype(x1, x2):
  534. return mnp.add(x1, x2, dtype=mnp.float32)
  535. def onp_add_dtype(x1, x2):
  536. return onp.add(x1, x2, dtype=onp.float32)
  537. @pytest.mark.level1
  538. @pytest.mark.platform_arm_ascend_training
  539. @pytest.mark.platform_x86_ascend_training
  540. @pytest.mark.platform_x86_gpu_training
  541. @pytest.mark.platform_x86_cpu
  542. @pytest.mark.env_onecard
  543. def test_add_dtype():
  544. x1 = rand_int(2, 3).astype('int32')
  545. x2 = rand_int(2, 3).astype('int32')
  546. arrs = (x1, x2)
  547. mnp_arrs = map(to_tensor, arrs)
  548. mnp_res = mnp_add_dtype(*mnp_arrs)
  549. onp_res = onp_add_dtype(*arrs)
  550. for actual, expected in zip(mnp_res, onp_res):
  551. assert actual.asnumpy().dtype == expected.dtype
  552. def mnp_matmul(x1, x2):
  553. return mnp.matmul(x1, x2)
  554. def onp_matmul(x1, x2):
  555. return onp.matmul(x1, x2)
  556. @pytest.mark.level1
  557. @pytest.mark.platform_arm_ascend_training
  558. @pytest.mark.platform_x86_ascend_training
  559. @pytest.mark.platform_x86_gpu_training
  560. @pytest.mark.platform_x86_cpu
  561. @pytest.mark.env_onecard
  562. def test_matmul():
  563. for scalar1 in test_case.scalars[1:]:
  564. for scalar2 in test_case.scalars[1:]:
  565. match_res(mnp_matmul, onp_matmul,
  566. scalar1, scalar2)
  567. for i in range(8):
  568. match_res(mnp_matmul, onp_matmul,
  569. test_case.core_broadcastables[2*i],
  570. test_case.core_broadcastables[2*i + 1])
  571. def mnp_square(x):
  572. return mnp.square(x)
  573. def onp_square(x):
  574. return onp.square(x)
  575. @pytest.mark.level1
  576. @pytest.mark.platform_arm_ascend_training
  577. @pytest.mark.platform_x86_ascend_training
  578. @pytest.mark.platform_x86_gpu_training
  579. @pytest.mark.platform_x86_cpu
  580. @pytest.mark.env_onecard
  581. def test_square():
  582. run_unary_test(mnp_square, onp_square, test_case)
  583. def mnp_sqrt(x):
  584. return mnp.sqrt(x)
  585. def onp_sqrt(x):
  586. return onp.sqrt(x)
  587. @pytest.mark.level1
  588. @pytest.mark.platform_arm_ascend_training
  589. @pytest.mark.platform_x86_ascend_training
  590. @pytest.mark.platform_x86_gpu_training
  591. @pytest.mark.platform_x86_cpu
  592. @pytest.mark.env_onecard
  593. def test_sqrt():
  594. run_unary_test(mnp_sqrt, onp_sqrt, test_case)
  595. def mnp_reciprocal(x):
  596. return mnp.reciprocal(x)
  597. def onp_reciprocal(x):
  598. return onp.reciprocal(x)
  599. @pytest.mark.level1
  600. @pytest.mark.platform_arm_ascend_training
  601. @pytest.mark.platform_x86_ascend_training
  602. @pytest.mark.platform_x86_gpu_training
  603. @pytest.mark.platform_x86_cpu
  604. @pytest.mark.env_onecard
  605. def test_reciprocal():
  606. run_unary_test(mnp_reciprocal, onp_reciprocal, test_case)
  607. def mnp_log(x):
  608. return mnp.log(x)
  609. def onp_log(x):
  610. return onp.log(x)
  611. @pytest.mark.level1
  612. @pytest.mark.platform_arm_ascend_training
  613. @pytest.mark.platform_x86_ascend_training
  614. @pytest.mark.platform_x86_gpu_training
  615. @pytest.mark.platform_x86_cpu
  616. @pytest.mark.env_onecard
  617. def test_log():
  618. run_unary_test(mnp.log, onp.log, test_case, error=1e-5)
  619. def mnp_log1p(x):
  620. return mnp.log1p(x)
  621. def onp_log1p(x):
  622. return onp.log1p(x)
  623. @pytest.mark.level1
  624. @pytest.mark.platform_arm_ascend_training
  625. @pytest.mark.platform_x86_ascend_training
  626. @pytest.mark.platform_x86_gpu_training
  627. @pytest.mark.platform_x86_cpu
  628. @pytest.mark.env_onecard
  629. def test_log1p():
  630. run_unary_test(mnp_log1p, onp_log1p, test_case, error=1e-5)
  631. def mnp_logaddexp(x1, x2):
  632. return mnp.logaddexp(x1, x2)
  633. def onp_logaddexp(x1, x2):
  634. return onp.logaddexp(x1, x2)
  635. @pytest.mark.level1
  636. @pytest.mark.platform_arm_ascend_training
  637. @pytest.mark.platform_x86_ascend_training
  638. @pytest.mark.platform_x86_gpu_training
  639. @pytest.mark.platform_x86_cpu
  640. @pytest.mark.env_onecard
  641. def test_logaddexp():
  642. test_cases = [
  643. onp.random.randint(1, 5, (2)).astype('float16'),
  644. onp.random.randint(1, 5, (3, 2)).astype('float16'),
  645. onp.random.randint(1, 5, (1, 3, 2)).astype('float16'),
  646. onp.random.randint(1, 5, (5, 6, 3, 2)).astype('float16')]
  647. for _, x1 in enumerate(test_cases):
  648. for _, x2 in enumerate(test_cases):
  649. expected = onp_logaddexp(x1, x2)
  650. actual = mnp_logaddexp(to_tensor(x1), to_tensor(x2))
  651. onp.testing.assert_almost_equal(actual.asnumpy().tolist(), expected.tolist(),
  652. decimal=2)
  653. def mnp_log2(x):
  654. return mnp.log2(x)
  655. def onp_log2(x):
  656. return onp.log2(x)
  657. @pytest.mark.level1
  658. @pytest.mark.platform_arm_ascend_training
  659. @pytest.mark.platform_x86_ascend_training
  660. @pytest.mark.platform_x86_gpu_training
  661. @pytest.mark.platform_x86_cpu
  662. @pytest.mark.env_onecard
  663. def test_log2():
  664. run_unary_test(mnp_log2, onp_log2, test_case, error=1e-5)
  665. def mnp_logaddexp2(x1, x2):
  666. return mnp.logaddexp2(x1, x2)
  667. def onp_logaddexp2(x1, x2):
  668. return onp.logaddexp2(x1, x2)
  669. @pytest.mark.level1
  670. @pytest.mark.platform_arm_ascend_training
  671. @pytest.mark.platform_x86_ascend_training
  672. @pytest.mark.platform_x86_gpu_training
  673. @pytest.mark.platform_x86_cpu
  674. @pytest.mark.env_onecard
  675. def test_logaddexp2():
  676. test_cases = [
  677. onp.random.randint(1, 5, (2)).astype('float16'),
  678. onp.random.randint(1, 5, (3, 2)).astype('float16'),
  679. onp.random.randint(1, 5, (1, 3, 2)).astype('float16'),
  680. onp.random.randint(1, 5, (5, 6, 3, 2)).astype('float16')]
  681. for _, x1 in enumerate(test_cases):
  682. for _, x2 in enumerate(test_cases):
  683. expected = onp_logaddexp2(x1, x2)
  684. actual = mnp_logaddexp2(to_tensor(x1), to_tensor(x2))
  685. onp.testing.assert_almost_equal(actual.asnumpy().tolist(), expected.tolist(),
  686. decimal=2)
  687. def mnp_log10(x):
  688. return mnp.log10(x)
  689. def onp_log10(x):
  690. return onp.log10(x)
  691. @pytest.mark.level1
  692. @pytest.mark.platform_arm_ascend_training
  693. @pytest.mark.platform_x86_ascend_training
  694. @pytest.mark.platform_x86_gpu_training
  695. @pytest.mark.platform_x86_cpu
  696. @pytest.mark.env_onecard
  697. def test_log10():
  698. run_unary_test(mnp_log10, onp_log10, test_case, error=1e-5)
  699. def mnp_maximum(x1, x2):
  700. return mnp.maximum(x1, x2)
  701. def onp_maximum(x1, x2):
  702. return onp.maximum(x1, x2)
  703. @pytest.mark.level1
  704. @pytest.mark.platform_x86_gpu_training
  705. @pytest.mark.platform_x86_cpu
  706. @pytest.mark.env_onecard
  707. def test_maximum():
  708. run_binop_test(mnp_maximum, onp_maximum, test_case)
  709. x = onp.random.randint(-10, 10, 20).astype(onp.float32)
  710. y = onp.random.randint(-10, 10, 20).astype(onp.float32)
  711. x[onp.random.randint(0, 10, 3)] = onp.nan
  712. y[onp.random.randint(0, 10, 3)] = onp.nan
  713. x[onp.random.randint(0, 10, 3)] = onp.NINF
  714. y[onp.random.randint(0, 10, 3)] = onp.NINF
  715. x[onp.random.randint(0, 10, 3)] = onp.PINF
  716. y[onp.random.randint(0, 10, 3)] = onp.PINF
  717. match_res(mnp_maximum, onp_maximum, x, y)
  718. match_res(mnp_maximum, onp_maximum, y, x)
  719. def mnp_clip(x):
  720. a = mnp.clip(x, to_tensor(10.0), to_tensor([2,]))
  721. b = mnp.clip(x, 0, 1)
  722. c = mnp.clip(x, to_tensor(0), to_tensor(10), dtype=mnp.float32)
  723. return a, b, c
  724. def onp_clip(x):
  725. a = onp.clip(x, onp.asarray(10.0), onp.asarray([2,]))
  726. b = onp.clip(x, 0, 1)
  727. c = onp.clip(x, onp.asarray(0), onp.asarray(10), dtype=onp.float32)
  728. return a, b, c
  729. @pytest.mark.level1
  730. @pytest.mark.platform_arm_ascend_training
  731. @pytest.mark.platform_x86_ascend_training
  732. @pytest.mark.platform_x86_gpu_training
  733. @pytest.mark.platform_x86_cpu
  734. @pytest.mark.env_onecard
  735. def test_clip():
  736. run_unary_test(mnp_clip, onp_clip, test_case)
  737. def mnp_amax(x, mask):
  738. a = mnp.amax(x)
  739. b = mnp.amax(x, axis=-3)
  740. c = mnp.amax(x, keepdims=True)
  741. d = mnp.amax(x, initial=3)
  742. e = mnp.amax(x, axis=(0, 1), keepdims=True)
  743. f = mnp.amax(x, initial=4, where=mask)
  744. g = mnp.amax(x, initial=5, where=mask, keepdims=True)
  745. h = mnp.amax(x, axis=(1, 2, 3), initial=6, where=mask)
  746. return a, b, c, d, e, f, g, h
  747. def onp_amax(x, mask):
  748. a = onp.amax(x)
  749. b = onp.amax(x, axis=-3)
  750. c = onp.amax(x, keepdims=True)
  751. d = onp.amax(x, initial=3)
  752. e = onp.amax(x, axis=(0, 1), keepdims=True)
  753. f = onp.amax(x, initial=4, where=mask)
  754. g = onp.amax(x, initial=5, where=mask, keepdims=True)
  755. h = onp.amax(x, axis=(1, 2, 3), initial=6, where=mask)
  756. return a, b, c, d, e, f, g, h
  757. @pytest.mark.level1
  758. @pytest.mark.platform_arm_ascend_training
  759. @pytest.mark.platform_x86_ascend_training
  760. @pytest.mark.platform_x86_gpu_training
  761. @pytest.mark.platform_x86_cpu
  762. @pytest.mark.env_onecard
  763. def test_amax():
  764. a = rand_int(2, 3, 4, 5).astype('float32')
  765. mask = rand_bool(2, 3, 4, 5)
  766. run_multi_test(mnp_amax, onp_amax, (a, mask))
  767. match_res(mnp.amax, onp.amax, rand_int())
  768. def mnp_amin(x, mask):
  769. a = mnp.amin(x)
  770. b = mnp.amin(x, axis=-3)
  771. c = mnp.amin(x, keepdims=True)
  772. d = mnp.amin(x, initial=-1)
  773. e = mnp.amin(x, axis=(0, 1), keepdims=True)
  774. f = mnp.amin(x, initial=-2)
  775. g = mnp.amin(x, initial=-3, keepdims=True)
  776. h = mnp.amin(x, axis=(1, 2, 3), initial=-4, where=mask)
  777. return a, b, c, d, e, f, g, h
  778. def onp_amin(x, mask):
  779. a = onp.amin(x)
  780. b = onp.amin(x, axis=-3)
  781. c = onp.amin(x, keepdims=True)
  782. d = onp.amin(x, initial=-1)
  783. e = onp.amin(x, axis=(0, 1), keepdims=True)
  784. f = onp.amin(x, initial=-2)
  785. g = onp.amin(x, initial=-3, keepdims=True)
  786. h = onp.amin(x, axis=(1, 2, 3), initial=-4, where=mask)
  787. return a, b, c, d, e, f, g, h
  788. @pytest.mark.level1
  789. @pytest.mark.platform_arm_ascend_training
  790. @pytest.mark.platform_x86_ascend_training
  791. @pytest.mark.platform_x86_gpu_training
  792. @pytest.mark.platform_x86_cpu
  793. @pytest.mark.env_onecard
  794. def test_amin():
  795. a = rand_int(2, 3, 4, 5).astype('float32')
  796. mask = rand_bool(2, 3, 4, 5)
  797. run_multi_test(mnp_amin, onp_amin, (a, mask))
  798. match_res(mnp.amin, onp.amin, rand_int())
  799. def mnp_hypot(x1, x2):
  800. return mnp.hypot(x1, x2)
  801. def onp_hypot(x1, x2):
  802. return onp.hypot(x1, x2)
  803. @pytest.mark.level1
  804. @pytest.mark.platform_arm_ascend_training
  805. @pytest.mark.platform_x86_ascend_training
  806. @pytest.mark.platform_x86_gpu_training
  807. @pytest.mark.platform_x86_cpu
  808. @pytest.mark.env_onecard
  809. def test_hypot():
  810. run_binop_test(mnp_hypot, onp_hypot, test_case)
  811. def mnp_heaviside(x1, x2):
  812. return mnp.heaviside(x1, x2)
  813. def onp_heaviside(x1, x2):
  814. return onp.heaviside(x1, x2)
  815. @pytest.mark.level1
  816. @pytest.mark.platform_arm_ascend_training
  817. @pytest.mark.platform_x86_ascend_training
  818. @pytest.mark.platform_x86_gpu_training
  819. @pytest.mark.platform_x86_cpu
  820. @pytest.mark.env_onecard
  821. def test_heaviside():
  822. broadcastables = test_case.broadcastables
  823. for b1 in broadcastables:
  824. for b2 in broadcastables:
  825. b = onp.subtract(b1, b2)
  826. match_res(mnp_heaviside, onp_heaviside, b, b1)
  827. match_res(mnp_heaviside, onp_heaviside, b, b2)
  828. def mnp_floor(x):
  829. return mnp.floor(x)
  830. def onp_floor(x):
  831. return onp.floor(x)
  832. @pytest.mark.level1
  833. @pytest.mark.platform_arm_ascend_training
  834. @pytest.mark.platform_x86_ascend_training
  835. @pytest.mark.platform_x86_gpu_training
  836. @pytest.mark.platform_x86_cpu
  837. @pytest.mark.env_onecard
  838. def test_floor():
  839. run_unary_test(mnp_floor, onp_floor, test_case)
  840. x = rand_int(2, 3) * onp.random.rand(2, 3)
  841. match_res(mnp_floor, onp_floor, x)
  842. match_res(mnp_floor, onp_floor, -x)
  843. def mnp_floor_divide(x, y):
  844. return mnp.floor_divide(x, y)
  845. def onp_floor_divde(x, y):
  846. return onp.floor_divide(x, y)
  847. @pytest.mark.level1
  848. @pytest.mark.platform_arm_ascend_training
  849. @pytest.mark.platform_x86_ascend_training
  850. @pytest.mark.platform_x86_gpu_training
  851. @pytest.mark.platform_x86_cpu
  852. @pytest.mark.env_onecard
  853. def test_floor_divide():
  854. run_binop_test(mnp_floor_divide, onp_floor_divde, test_case)
  855. def mnp_remainder(x, y):
  856. return mnp.remainder(x, y)
  857. def onp_remainder(x, y):
  858. return onp.remainder(x, y)
  859. @pytest.mark.level1
  860. @pytest.mark.platform_arm_ascend_training
  861. @pytest.mark.platform_x86_ascend_training
  862. @pytest.mark.platform_x86_gpu_training
  863. @pytest.mark.platform_x86_cpu
  864. @pytest.mark.env_onecard
  865. def test_remainder():
  866. x = rand_int(2, 3)
  867. y = rand_int(2, 3)
  868. match_res(mnp_remainder, onp_remainder, x, y)
  869. def mnp_mod(x, y):
  870. return mnp.mod(x, y)
  871. def onp_mod(x, y):
  872. return onp.mod(x, y)
  873. @pytest.mark.level1
  874. @pytest.mark.platform_arm_ascend_training
  875. @pytest.mark.platform_x86_ascend_training
  876. @pytest.mark.platform_x86_gpu_training
  877. @pytest.mark.platform_x86_cpu
  878. @pytest.mark.env_onecard
  879. def test_mod():
  880. x = rand_int(2, 3)
  881. y = rand_int(2, 3)
  882. match_res(mnp_mod, onp_mod, x, y)
  883. def mnp_fmod(x, y):
  884. return mnp.fmod(x, y)
  885. def onp_fmod(x, y):
  886. return onp.fmod(x, y)
  887. @pytest.mark.level1
  888. @pytest.mark.platform_x86_gpu_training
  889. @pytest.mark.platform_x86_cpu
  890. @pytest.mark.env_onecard
  891. def test_fmod():
  892. x = rand_int(2, 3)
  893. y = rand_int(2, 3)
  894. match_res(mnp_fmod, onp_fmod, x, y)
  895. def mnp_fix(x):
  896. return mnp.fix(x)
  897. def onp_fix(x):
  898. return onp.fix(x)
  899. @pytest.mark.level1
  900. @pytest.mark.platform_arm_ascend_training
  901. @pytest.mark.platform_x86_ascend_training
  902. @pytest.mark.platform_x86_gpu_training
  903. @pytest.mark.platform_x86_cpu
  904. @pytest.mark.env_onecard
  905. def test_fix():
  906. x = rand_int(2, 3)
  907. y = rand_int(2, 3)
  908. floats = onp.divide(onp.subtract(x, y), y)
  909. match_res(mnp_fix, onp_fix, floats, error=1e-5)
  910. def mnp_trunc(x):
  911. return mnp.trunc(x)
  912. def onp_trunc(x):
  913. return onp.trunc(x)
  914. @pytest.mark.level1
  915. @pytest.mark.platform_arm_ascend_training
  916. @pytest.mark.platform_x86_ascend_training
  917. @pytest.mark.platform_x86_gpu_training
  918. @pytest.mark.platform_x86_cpu
  919. @pytest.mark.env_onecard
  920. def test_trunc():
  921. x = rand_int(2, 3)
  922. y = rand_int(2, 3)
  923. floats = onp.divide(onp.subtract(x, y), y)
  924. match_res(mnp_trunc, onp_trunc, floats, error=1e-5)
  925. def mnp_exp(x):
  926. return mnp.exp(x)
  927. def onp_exp(x):
  928. return onp.exp(x)
  929. @pytest.mark.level1
  930. @pytest.mark.platform_arm_ascend_training
  931. @pytest.mark.platform_x86_ascend_training
  932. @pytest.mark.platform_x86_gpu_training
  933. @pytest.mark.platform_x86_cpu
  934. @pytest.mark.env_onecard
  935. def test_exp():
  936. run_unary_test(mnp_exp, onp_exp, test_case, error=5)
  937. def mnp_expm1(x):
  938. return mnp.expm1(x)
  939. def onp_expm1(x):
  940. return onp.expm1(x)
  941. @pytest.mark.level1
  942. @pytest.mark.platform_arm_ascend_training
  943. @pytest.mark.platform_x86_ascend_training
  944. @pytest.mark.platform_x86_gpu_training
  945. @pytest.mark.platform_x86_cpu
  946. @pytest.mark.env_onecard
  947. def test_expm1():
  948. run_unary_test(mnp_expm1, onp_expm1, test_case, error=5)
  949. def mnp_exp2(x):
  950. return mnp.exp2(x)
  951. def onp_exp2(x):
  952. return onp.exp2(x)
  953. @pytest.mark.level1
  954. @pytest.mark.platform_arm_ascend_training
  955. @pytest.mark.platform_x86_ascend_training
  956. @pytest.mark.platform_x86_gpu_training
  957. @pytest.mark.platform_x86_cpu
  958. @pytest.mark.env_onecard
  959. def test_exp2():
  960. run_unary_test(mnp_exp2, onp_exp2, test_case, error=5)
  961. def mnp_kron(x, y):
  962. return mnp.kron(x, y)
  963. def onp_kron(x, y):
  964. return onp.kron(x, y)
  965. @pytest.mark.level1
  966. @pytest.mark.platform_arm_ascend_training
  967. @pytest.mark.platform_x86_ascend_training
  968. @pytest.mark.platform_x86_gpu_training
  969. @pytest.mark.platform_x86_cpu
  970. @pytest.mark.env_onecard
  971. def test_kron():
  972. run_binop_test(mnp_kron, onp_kron, test_case)
  973. @pytest.mark.level1
  974. @pytest.mark.platform_arm_ascend_training
  975. @pytest.mark.platform_x86_ascend_training
  976. @pytest.mark.platform_x86_gpu_training
  977. @pytest.mark.platform_x86_cpu
  978. @pytest.mark.env_onecard
  979. def test_cross():
  980. x = onp.arange(8).reshape(2, 2, 1, 2)
  981. y = onp.arange(4).reshape(1, 2, 2)
  982. match_res(mnp.cross, onp.cross, x, y)
  983. match_res(mnp.cross, onp.cross, x, y, axisa=-3, axisb=1, axisc=2)
  984. match_res(mnp.cross, onp.cross, x, y, axisa=-3, axisb=1, axisc=2, axis=1)
  985. x = onp.arange(18).reshape(2, 3, 1, 3)
  986. y = onp.arange(9).reshape(1, 3, 3)
  987. match_res(mnp.cross, onp.cross, x, y)
  988. match_res(mnp.cross, onp.cross, x, y, axisa=-3, axisb=1, axisc=2)
  989. match_res(mnp.cross, onp.cross, x, y, axisa=-3, axisb=1, axisc=2, axis=1)
  990. def mnp_ceil(x):
  991. return mnp.ceil(x)
  992. def onp_ceil(x):
  993. return onp.ceil(x)
  994. @pytest.mark.platform_arm_ascend_training
  995. @pytest.mark.platform_x86_ascend_training
  996. @pytest.mark.platform_x86_gpu_training
  997. @pytest.mark.platform_x86_cpu
  998. @pytest.mark.env_onecard
  999. def test_ceil():
  1000. run_unary_test(mnp_ceil, onp_ceil, test_case)
  1001. def mnp_positive(x):
  1002. return mnp.positive(x)
  1003. def onp_positive(x):
  1004. return onp.positive(x)
  1005. @pytest.mark.level1
  1006. @pytest.mark.platform_arm_ascend_training
  1007. @pytest.mark.platform_x86_ascend_training
  1008. @pytest.mark.platform_x86_gpu_training
  1009. @pytest.mark.platform_x86_cpu
  1010. @pytest.mark.env_onecard
  1011. def test_positive():
  1012. arr = onp.arange(-6, 6).reshape((2, 2, 3)).astype('float32')
  1013. onp_pos = onp_positive(arr)
  1014. mnp_pos = mnp_positive(to_tensor(arr))
  1015. match_array(mnp_pos.asnumpy(), onp_pos)
  1016. def mnp_negative(x):
  1017. return mnp.negative(x)
  1018. def onp_negative(x):
  1019. return onp.negative(x)
  1020. @pytest.mark.level1
  1021. @pytest.mark.platform_arm_ascend_training
  1022. @pytest.mark.platform_x86_ascend_training
  1023. @pytest.mark.platform_x86_gpu_training
  1024. @pytest.mark.platform_x86_cpu
  1025. @pytest.mark.env_onecard
  1026. def test_negative():
  1027. arr = onp.arange(-6, 6).reshape((2, 2, 3)).astype('float32')
  1028. onp_neg = onp_negative(arr)
  1029. mnp_neg = mnp_negative(to_tensor(arr))
  1030. match_array(mnp_neg.asnumpy(), onp_neg, 1e-5)
  1031. @pytest.mark.level1
  1032. @pytest.mark.platform_arm_ascend_training
  1033. @pytest.mark.platform_x86_ascend_training
  1034. @pytest.mark.platform_x86_gpu_training
  1035. @pytest.mark.platform_x86_cpu
  1036. @pytest.mark.env_onecard
  1037. def test_cumsum():
  1038. x = mnp.ones((16, 16), dtype="bool")
  1039. match_array(mnp.cumsum(x).asnumpy(), onp.cumsum(x.asnumpy()))
  1040. match_array(mnp.cumsum(x, axis=0).asnumpy(),
  1041. onp.cumsum(x.asnumpy(), axis=0))
  1042. match_meta(mnp.cumsum(x).asnumpy(), onp.cumsum(x.asnumpy()))
  1043. x = rand_int(3, 4, 5)
  1044. match_array(mnp.cumsum(to_tensor(x), dtype="bool").asnumpy(),
  1045. onp.cumsum(x, dtype="bool"))
  1046. match_array(mnp.cumsum(to_tensor(x), axis=-1).asnumpy(),
  1047. onp.cumsum(x, axis=-1))
  1048. @pytest.mark.level1
  1049. @pytest.mark.platform_arm_ascend_training
  1050. @pytest.mark.platform_x86_ascend_training
  1051. @pytest.mark.platform_x86_gpu_training
  1052. @pytest.mark.platform_x86_cpu
  1053. @pytest.mark.env_onecard
  1054. def test_promote_types():
  1055. assert mnp.promote_types(mnp.int32, mnp.bool_) == mnp.int32
  1056. assert mnp.promote_types(int, mnp.bool_) == mnp.int32
  1057. assert mnp.promote_types("float32", mnp.int64) == mnp.float32
  1058. assert mnp.promote_types(mnp.int64, mnp.float16) == mnp.float16
  1059. assert mnp.promote_types(int, float) == mnp.float32
  1060. def mnp_diff(input_tensor):
  1061. a = mnp.diff(input_tensor, 2, append=3.0)
  1062. b = mnp.diff(input_tensor, 4, prepend=6, axis=-2)
  1063. c = mnp.diff(input_tensor, 0, append=3.0, axis=-1)
  1064. d = mnp.diff(input_tensor, 1, prepend=input_tensor)
  1065. e = mnp.ediff1d(input_tensor, to_end=input_tensor)
  1066. f = mnp.ediff1d(input_tensor)
  1067. g = mnp.ediff1d(input_tensor, to_begin=3)
  1068. return a, b, c, d, e, f, g
  1069. def onp_diff(input_array):
  1070. a = onp.diff(input_array, 2, append=3.0)
  1071. b = onp.diff(input_array, 4, prepend=6, axis=-2)
  1072. c = onp.diff(input_array, 0, append=3.0, axis=-1)
  1073. d = onp.diff(input_array, 1, prepend=input_array)
  1074. e = onp.ediff1d(input_array, to_end=input_array)
  1075. f = onp.ediff1d(input_array)
  1076. g = onp.ediff1d(input_array, to_begin=3)
  1077. return a, b, c, d, e, f, g
  1078. @pytest.mark.level1
  1079. @pytest.mark.platform_arm_ascend_training
  1080. @pytest.mark.platform_x86_ascend_training
  1081. @pytest.mark.platform_x86_gpu_training
  1082. @pytest.mark.platform_x86_cpu
  1083. @pytest.mark.env_onecard
  1084. def test_diff():
  1085. arr = rand_int(3, 4, 5)
  1086. match_res(mnp_diff, onp_diff, arr)
  1087. arr = rand_int(1, 4, 6, 3)
  1088. match_res(mnp_diff, onp_diff, arr)
  1089. def mnp_sin(x):
  1090. return mnp.sin(x)
  1091. def onp_sin(x):
  1092. return onp.sin(x)
  1093. @pytest.mark.level1
  1094. @pytest.mark.platform_arm_ascend_training
  1095. @pytest.mark.platform_x86_ascend_training
  1096. @pytest.mark.platform_x86_gpu_training
  1097. @pytest.mark.platform_x86_cpu
  1098. @pytest.mark.env_onecard
  1099. def test_sin():
  1100. arr = onp.random.rand(2, 3, 4).astype('float32')
  1101. expect = onp_sin(arr)
  1102. actual = mnp_sin(to_tensor(arr))
  1103. match_array(actual.asnumpy(), expect, error=5)
  1104. def mnp_cos(x):
  1105. return mnp.cos(x)
  1106. def onp_cos(x):
  1107. return onp.cos(x)
  1108. @pytest.mark.level1
  1109. @pytest.mark.platform_arm_ascend_training
  1110. @pytest.mark.platform_x86_ascend_training
  1111. @pytest.mark.platform_x86_gpu_training
  1112. @pytest.mark.platform_x86_cpu
  1113. @pytest.mark.env_onecard
  1114. def test_cos():
  1115. arr = onp.random.rand(2, 3, 4).astype('float32')
  1116. expect = onp_cos(arr)
  1117. actual = mnp_cos(to_tensor(arr))
  1118. match_array(actual.asnumpy(), expect, error=5)
  1119. def mnp_tan(x):
  1120. return mnp.tan(x)
  1121. def onp_tan(x):
  1122. return onp.tan(x)
  1123. @pytest.mark.level1
  1124. @pytest.mark.platform_arm_ascend_training
  1125. @pytest.mark.platform_x86_ascend_training
  1126. @pytest.mark.platform_x86_cpu
  1127. @pytest.mark.env_onecard
  1128. def test_tan():
  1129. arr = onp.array([-0.75, -0.5, 0, 0.5, 0.75]).astype('float32')
  1130. expect = onp_tan(arr)
  1131. actual = mnp_tan(to_tensor(arr))
  1132. match_array(actual.asnumpy(), expect, error=5)
  1133. def mnp_arcsin(x):
  1134. return mnp.arcsin(x)
  1135. def onp_arcsin(x):
  1136. return onp.arcsin(x)
  1137. @pytest.mark.level1
  1138. @pytest.mark.platform_arm_ascend_training
  1139. @pytest.mark.platform_x86_ascend_training
  1140. @pytest.mark.platform_x86_gpu_training
  1141. @pytest.mark.platform_x86_cpu
  1142. @pytest.mark.env_onecard
  1143. def test_arcsin():
  1144. arr = onp.random.uniform(-1, 1, 12).astype('float32')
  1145. onp_asin = onp_arcsin(arr)
  1146. mnp_asin = mnp_arcsin(to_tensor(arr))
  1147. match_array(mnp_asin.asnumpy(), onp_asin, error=3)
  1148. def mnp_arccos(x):
  1149. return mnp.arccos(x)
  1150. def onp_arccos(x):
  1151. return onp.arccos(x)
  1152. @pytest.mark.level1
  1153. @pytest.mark.platform_arm_ascend_training
  1154. @pytest.mark.platform_x86_ascend_training
  1155. @pytest.mark.platform_x86_gpu_training
  1156. @pytest.mark.platform_x86_cpu
  1157. @pytest.mark.env_onecard
  1158. def test_arccos():
  1159. arr = onp.random.uniform(-1, 1, 12).astype('float32')
  1160. onp_acos = onp_arccos(arr)
  1161. mnp_acos = mnp_arccos(to_tensor(arr))
  1162. match_array(mnp_acos.asnumpy(), onp_acos, error=2)
  1163. def mnp_arctan(x):
  1164. return mnp.arctan(x)
  1165. def onp_arctan(x):
  1166. return onp.arctan(x)
  1167. @pytest.mark.level1
  1168. @pytest.mark.platform_arm_ascend_training
  1169. @pytest.mark.platform_x86_ascend_training
  1170. @pytest.mark.platform_x86_gpu_training
  1171. @pytest.mark.platform_x86_cpu
  1172. @pytest.mark.env_onecard
  1173. def test_arctan():
  1174. arr = onp.random.uniform(-1, 1, 12).astype('float32')
  1175. onp_atan = onp_arctan(arr)
  1176. mnp_atan = mnp_arctan(to_tensor(arr))
  1177. match_array(mnp_atan.asnumpy(), onp_atan, error=5)
  1178. def mnp_sinh(x):
  1179. return mnp.sinh(x)
  1180. def onp_sinh(x):
  1181. return onp.sinh(x)
  1182. @pytest.mark.level1
  1183. @pytest.mark.platform_arm_ascend_training
  1184. @pytest.mark.platform_x86_ascend_training
  1185. @pytest.mark.platform_x86_cpu
  1186. @pytest.mark.env_onecard
  1187. def test_sinh():
  1188. arr = onp.random.rand(2, 3, 4).astype('float32')
  1189. expect = onp_sinh(arr)
  1190. actual = mnp_sinh(to_tensor(arr))
  1191. match_array(actual.asnumpy(), expect, error=5)
  1192. def mnp_cosh(x):
  1193. return mnp.cosh(x)
  1194. def onp_cosh(x):
  1195. return onp.cosh(x)
  1196. @pytest.mark.level1
  1197. @pytest.mark.platform_arm_ascend_training
  1198. @pytest.mark.platform_x86_ascend_training
  1199. @pytest.mark.platform_x86_cpu
  1200. @pytest.mark.env_onecard
  1201. def test_cosh():
  1202. arr = onp.random.rand(2, 3, 4).astype('float32')
  1203. expect = onp_cosh(arr)
  1204. actual = mnp_cosh(to_tensor(arr))
  1205. match_array(actual.asnumpy(), expect, error=5)
  1206. def mnp_tanh(x):
  1207. return mnp.tanh(x)
  1208. def onp_tanh(x):
  1209. return onp.tanh(x)
  1210. @pytest.mark.level1
  1211. @pytest.mark.platform_arm_ascend_training
  1212. @pytest.mark.platform_x86_ascend_training
  1213. @pytest.mark.platform_x86_gpu_training
  1214. @pytest.mark.platform_x86_cpu
  1215. @pytest.mark.env_onecard
  1216. def test_tanh():
  1217. arr = onp.random.rand(2, 3, 4).astype('float32')
  1218. expect = onp_tanh(arr)
  1219. actual = mnp_tanh(to_tensor(arr))
  1220. match_array(actual.asnumpy(), expect, error=5)
  1221. def mnp_arcsinh(x):
  1222. return mnp.arcsinh(x)
  1223. def onp_arcsinh(x):
  1224. return onp.arcsinh(x)
  1225. @pytest.mark.level1
  1226. @pytest.mark.platform_arm_ascend_training
  1227. @pytest.mark.platform_x86_ascend_training
  1228. @pytest.mark.platform_x86_gpu_training
  1229. @pytest.mark.platform_x86_cpu
  1230. @pytest.mark.env_onecard
  1231. def test_arcsinh():
  1232. arr = onp.random.rand(2, 3, 4).astype('float32')
  1233. expect = onp_arcsinh(arr)
  1234. actual = mnp_arcsinh(to_tensor(arr))
  1235. match_array(actual.asnumpy(), expect, error=5)
  1236. def mnp_arccosh(x):
  1237. return mnp.arccosh(x)
  1238. def onp_arccosh(x):
  1239. return onp.arccosh(x)
  1240. @pytest.mark.level1
  1241. @pytest.mark.platform_arm_ascend_training
  1242. @pytest.mark.platform_x86_ascend_training
  1243. @pytest.mark.platform_x86_gpu_training
  1244. @pytest.mark.platform_x86_cpu
  1245. @pytest.mark.env_onecard
  1246. def test_arccosh():
  1247. arr = onp.random.randint(1, 100, size=(2, 3)).astype('float32')
  1248. expect = onp_arccosh(arr)
  1249. actual = mnp_arccosh(to_tensor(arr))
  1250. match_array(actual.asnumpy(), expect, error=5)
  1251. def mnp_arctanh(x):
  1252. return mnp.arctanh(x)
  1253. def onp_arctanh(x):
  1254. return onp.arctanh(x)
  1255. @pytest.mark.level1
  1256. @pytest.mark.platform_arm_ascend_training
  1257. @pytest.mark.platform_x86_ascend_training
  1258. @pytest.mark.platform_x86_cpu
  1259. @pytest.mark.env_onecard
  1260. def test_arctanh():
  1261. arr = onp.random.uniform(-0.9, 1, 10).astype('float32')
  1262. expect = onp_arctanh(arr)
  1263. actual = mnp_arctanh(to_tensor(arr))
  1264. match_array(actual.asnumpy(), expect, error=5)
  1265. def mnp_arctan2(x, y):
  1266. return mnp.arctan2(x, y)
  1267. def onp_arctan2(x, y):
  1268. return onp.arctan2(x, y)
  1269. @pytest.mark.level1
  1270. @pytest.mark.platform_arm_ascend_training
  1271. @pytest.mark.platform_x86_ascend_training
  1272. @pytest.mark.platform_x86_cpu
  1273. @pytest.mark.env_onecard
  1274. def test_arctan2():
  1275. run_binop_test(mnp_arctan2, onp_arctan2, test_case, error=5)
  1276. def mnp_convolve(mode):
  1277. a = mnp.convolve([1, 2, 3, 4, 5], 2, mode=mode)
  1278. b = mnp.convolve([1, 2, 3, 4, 5], [2, 3], mode=mode)
  1279. c = mnp.convolve([1, 2], [2, 5, 10], mode=mode)
  1280. d = mnp.convolve(mnp.array([1, 2, 3, 4, 5]), mnp.array([1, 2, 3, 4, 5]), mode=mode)
  1281. e = mnp.convolve([1, 2, 3, 4, 5], 2, mode=mode)
  1282. return a, b, c, d, e
  1283. def onp_convolve(mode):
  1284. a = onp.convolve([1, 2, 3, 4, 5], 2, mode=mode)
  1285. b = onp.convolve([1, 2, 3, 4, 5], [2, 3], mode=mode)
  1286. c = onp.convolve([1, 2], [2, 5, 10], mode=mode)
  1287. d = onp.convolve(onp.array([1, 2, 3, 4, 5]), onp.array([1, 2, 3, 4, 5]), mode=mode)
  1288. e = onp.convolve([1, 2, 3, 4, 5], 2, mode=mode)
  1289. return a, b, c, d, e
  1290. @pytest.mark.level1
  1291. @pytest.mark.platform_x86_gpu_training
  1292. @pytest.mark.env_onecard
  1293. def test_convolve():
  1294. for mode in ['full', 'same', 'valid']:
  1295. mnp_res = mnp_convolve(mode)
  1296. onp_res = onp_convolve(mode)
  1297. match_all_arrays(mnp_res, onp_res)
  1298. @pytest.mark.level1
  1299. @pytest.mark.platform_arm_ascend_training
  1300. @pytest.mark.platform_x86_ascend_training
  1301. @pytest.mark.platform_x86_gpu_training
  1302. @pytest.mark.platform_x86_cpu
  1303. @pytest.mark.env_onecard
  1304. def test_cov():
  1305. x = onp.random.random((3, 4)).tolist()
  1306. mnp_res = mnp.cov(x)
  1307. onp_res = onp.cov(x)
  1308. match_all_arrays(mnp_res, onp_res, error=1e-5)
  1309. mnp_res = mnp.cov(x[0])
  1310. onp_res = onp.cov(x[0])
  1311. match_all_arrays(mnp_res, onp_res, error=1e-5)
  1312. w1 = [0, 1, 2, 3]
  1313. w2 = [4, 5, 6, 7]
  1314. mnp_res = mnp.cov(x, fweights=w1)
  1315. onp_res = onp.cov(x, fweights=w1)
  1316. match_all_arrays(mnp_res, onp_res, error=1e-5)
  1317. mnp_res = mnp.cov(x, aweights=w2)
  1318. onp_res = onp.cov(x, aweights=w2)
  1319. match_all_arrays(mnp_res, onp_res, error=1e-5)
  1320. mnp_res = mnp.cov(x, fweights=w1, aweights=w2)
  1321. onp_res = onp.cov(x, fweights=w1, aweights=w2)
  1322. match_all_arrays(mnp_res, onp_res, error=1e-5)
  1323. mnp_res = mnp.cov(x, fweights=w1, aweights=w2, ddof=3)
  1324. onp_res = onp.cov(x, fweights=w1, aweights=w2, ddof=3)
  1325. match_all_arrays(mnp_res, onp_res, error=1e-5)
  1326. mnp_res = mnp.cov(x, fweights=w1, aweights=w2, bias=True)
  1327. onp_res = onp.cov(x, fweights=w1, aweights=w2, bias=True)
  1328. match_all_arrays(mnp_res, onp_res, error=1e-5)
  1329. mnp_res = mnp.cov(x, fweights=w1[0:3], aweights=w2[0:3], rowvar=False, bias=True)
  1330. onp_res = onp.cov(x, fweights=w1[0:3], aweights=w2[0:3], rowvar=False, bias=True)
  1331. match_all_arrays(mnp_res, onp_res, error=1e-5)
  1332. @pytest.mark.level1
  1333. @pytest.mark.platform_arm_ascend_training
  1334. @pytest.mark.platform_x86_ascend_training
  1335. @pytest.mark.platform_x86_gpu_training
  1336. @pytest.mark.platform_x86_cpu
  1337. @pytest.mark.env_onecard
  1338. def test_trapz():
  1339. y = rand_int(2, 3, 4, 5)
  1340. match_res(mnp.trapz, onp.trapz, y)
  1341. match_res(mnp.trapz, onp.trapz, y, x=[-5, -3, 0, 7, 10])
  1342. match_res(mnp.trapz, onp.trapz, y, dx=2, axis=3)
  1343. match_res(mnp.trapz, onp.trapz, y, x=[1, 5, 6, 9], dx=3, axis=-2)
  1344. def mnp_gcd(x, y):
  1345. return mnp.gcd(x, y)
  1346. def onp_gcd(x, y):
  1347. return onp.gcd(x, y)
  1348. @pytest.mark.level1
  1349. @pytest.mark.platform_arm_ascend_training
  1350. @pytest.mark.platform_x86_ascend_training
  1351. @pytest.mark.platform_x86_gpu_training
  1352. @pytest.mark.platform_x86_cpu
  1353. @pytest.mark.env_onecard
  1354. def test_gcd():
  1355. x = onp.arange(-12, 12).reshape(2, 3, 4)
  1356. y = onp.arange(24).reshape(2, 3, 4)
  1357. match_res(mnp_gcd, onp_gcd, x, y)
  1358. def mnp_lcm(x, y):
  1359. return mnp.lcm(x, y)
  1360. def onp_lcm(x, y):
  1361. return onp.lcm(x, y)
  1362. @pytest.mark.level1
  1363. @pytest.mark.platform_arm_ascend_training
  1364. @pytest.mark.platform_x86_ascend_training
  1365. @pytest.mark.platform_x86_gpu_training
  1366. @pytest.mark.platform_x86_cpu
  1367. @pytest.mark.env_onecard
  1368. def test_lcm():
  1369. x = onp.arange(-12, 12).reshape(2, 3, 4)
  1370. y = onp.arange(24).reshape(2, 3, 4)
  1371. match_res(mnp_lcm, onp_lcm, x, y)
  1372. def mnp_nansum(x):
  1373. a = mnp.nansum(x)
  1374. b = mnp.nansum(x, keepdims=True)
  1375. c = mnp.nansum(x, axis=-2)
  1376. d = mnp.nansum(x, axis=0, keepdims=True)
  1377. e = mnp.nansum(x, axis=(-2, 3))
  1378. f = mnp.nansum(x, axis=(-3, -1), keepdims=True)
  1379. return a, b, c, d, e, f
  1380. def onp_nansum(x):
  1381. a = onp.nansum(x)
  1382. b = onp.nansum(x, keepdims=True)
  1383. c = onp.nansum(x, axis=-2)
  1384. d = onp.nansum(x, axis=0, keepdims=True)
  1385. e = onp.nansum(x, axis=(-2, 3))
  1386. f = onp.nansum(x, axis=(-3, -1), keepdims=True)
  1387. return a, b, c, d, e, f
  1388. @pytest.mark.level1
  1389. @pytest.mark.platform_x86_gpu_training
  1390. @pytest.mark.platform_x86_cpu
  1391. @pytest.mark.env_onecard
  1392. def test_nansum():
  1393. x = rand_int(2, 3, 4, 5)
  1394. x[0][2][1][3] = onp.nan
  1395. x[1][0][2][4] = onp.nan
  1396. x[1][1][1][1] = onp.nan
  1397. run_multi_test(mnp_nansum, onp_nansum, (x,))
  1398. match_res(mnp.nansum, onp.nansum, rand_int())
  1399. def mnp_nanmean(x):
  1400. a = mnp.nanmean(x)
  1401. b = mnp.nanmean(x, keepdims=True)
  1402. c = mnp.nanmean(x, axis=-2)
  1403. d = mnp.nanmean(x, axis=0, keepdims=True)
  1404. e = mnp.nanmean(x, axis=(-2, 3))
  1405. f = mnp.nanmean(x, axis=(-3, -1), keepdims=True)
  1406. return a, b, c, d, e, f
  1407. def onp_nanmean(x):
  1408. a = onp.nanmean(x)
  1409. b = onp.nanmean(x, keepdims=True)
  1410. c = onp.nanmean(x, axis=-2)
  1411. d = onp.nanmean(x, axis=0, keepdims=True)
  1412. e = onp.nanmean(x, axis=(-2, 3))
  1413. f = onp.nanmean(x, axis=(-3, -1), keepdims=True)
  1414. return a, b, c, d, e, f
  1415. @pytest.mark.level1
  1416. @pytest.mark.platform_x86_gpu_training
  1417. @pytest.mark.platform_x86_cpu
  1418. @pytest.mark.env_onecard
  1419. def test_nanmean():
  1420. x = rand_int(2, 3, 4, 5)
  1421. x[0][2][1][3] = onp.nan
  1422. x[1][0][2][4] = onp.nan
  1423. x[1][1][1][1] = onp.nan
  1424. run_multi_test(mnp_nanmean, onp_nanmean, (x,))
  1425. match_res(mnp.nanmean, onp.nanmean, rand_int())
  1426. def mnp_mean(*arrs):
  1427. arr1 = arrs[0]
  1428. arr2 = arrs[1]
  1429. arr3 = arrs[2]
  1430. a = mnp.mean(arr1)
  1431. b = mnp.mean(arr2, keepdims=True)
  1432. c = mnp.mean(arr3, keepdims=False)
  1433. d = mnp.mean(arr2, axis=0, keepdims=True)
  1434. e = mnp.mean(arr3, axis=(0, -1))
  1435. f = mnp.mean(arr3, axis=-1, keepdims=True)
  1436. return a, b, c, d, e, f
  1437. def onp_mean(*arrs):
  1438. arr1 = arrs[0]
  1439. arr2 = arrs[1]
  1440. arr3 = arrs[2]
  1441. a = onp.mean(arr1)
  1442. b = onp.mean(arr2, keepdims=True)
  1443. c = onp.mean(arr3, keepdims=False)
  1444. d = onp.mean(arr2, axis=0, keepdims=True)
  1445. e = onp.mean(arr3, axis=(0, -1))
  1446. f = onp.mean(arr3, axis=-1, keepdims=True)
  1447. return a, b, c, d, e, f
  1448. @pytest.mark.level1
  1449. @pytest.mark.platform_arm_ascend_training
  1450. @pytest.mark.platform_x86_ascend_training
  1451. @pytest.mark.platform_x86_gpu_training
  1452. @pytest.mark.platform_x86_cpu
  1453. @pytest.mark.env_onecard
  1454. def test_mean():
  1455. run_multi_test(mnp_mean, onp_mean, test_case.arrs, error=3)
  1456. run_multi_test(mnp_mean, onp_mean, test_case.expanded_arrs, error=3)
  1457. run_multi_test(mnp_mean, onp_mean, test_case.scalars, error=3)
  1458. @pytest.mark.level1
  1459. @pytest.mark.platform_arm_ascend_training
  1460. @pytest.mark.platform_x86_ascend_training
  1461. @pytest.mark.platform_x86_gpu_training
  1462. @pytest.mark.platform_x86_cpu
  1463. @pytest.mark.env_onecard
  1464. def test_exception_innner():
  1465. with pytest.raises(ValueError):
  1466. mnp.inner(to_tensor(test_case.arrs[0]),
  1467. to_tensor(test_case.arrs[1]))
  1468. @pytest.mark.level1
  1469. @pytest.mark.platform_arm_ascend_training
  1470. @pytest.mark.platform_x86_ascend_training
  1471. @pytest.mark.platform_x86_gpu_training
  1472. @pytest.mark.platform_x86_cpu
  1473. @pytest.mark.env_onecard
  1474. def test_exception_add():
  1475. with pytest.raises(ValueError):
  1476. mnp.add(to_tensor(test_case.arrs[1]), to_tensor(test_case.arrs[2]))
  1477. @pytest.mark.level1
  1478. @pytest.mark.platform_arm_ascend_training
  1479. @pytest.mark.platform_x86_ascend_training
  1480. @pytest.mark.platform_x86_gpu_training
  1481. @pytest.mark.platform_x86_cpu
  1482. @pytest.mark.env_onecard
  1483. def test_exception_mean():
  1484. with pytest.raises(ValueError):
  1485. mnp.mean(to_tensor(test_case.arrs[0]), (-1, 0))
  1486. @pytest.mark.level1
  1487. @pytest.mark.platform_arm_ascend_training
  1488. @pytest.mark.platform_x86_ascend_training
  1489. @pytest.mark.platform_x86_gpu_training
  1490. @pytest.mark.platform_x86_cpu
  1491. @pytest.mark.env_onecard
  1492. def test_exception_amax():
  1493. with pytest.raises(TypeError):
  1494. mnp.amax(mnp.array([[1, 2], [3, 4]]).astype(mnp.float32), initial=[1.0, 2.0])