You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_math_ops.py 51 kB

4 years ago
4 years ago
4 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """unit tests for numpy math operations"""
  16. import pytest
  17. import numpy as onp
  18. import mindspore.numpy as mnp
  19. from .utils import rand_int, rand_bool, run_binop_test, run_unary_test, run_multi_test, \
  20. run_single_test, match_res, match_array, match_meta, match_all_arrays, to_tensor
  21. class Cases():
  22. def __init__(self):
  23. self.arrs = [
  24. rand_int(2),
  25. rand_int(2, 3),
  26. rand_int(2, 3, 4),
  27. ]
  28. # scalars expanded across the 0th dimension
  29. self.scalars = [
  30. rand_int(),
  31. rand_int(1),
  32. rand_int(1, 1),
  33. ]
  34. # empty arrays
  35. self.empty_arrs = [
  36. rand_int(0),
  37. rand_int(4, 0),
  38. rand_int(2, 0, 2),
  39. ]
  40. # arrays of the same size expanded across the 0th dimension
  41. self.expanded_arrs = [
  42. rand_int(2, 3),
  43. rand_int(1, 2, 3),
  44. rand_int(1, 1, 2, 3),
  45. ]
  46. # arrays with last dimension aligned
  47. self.aligned_arrs = [
  48. rand_int(2, 3),
  49. rand_int(1, 4, 3),
  50. rand_int(5, 1, 2, 3),
  51. rand_int(4, 2, 1, 1, 3),
  52. ]
  53. # arrays which can be broadcast
  54. self.broadcastables = [
  55. rand_int(5),
  56. rand_int(6, 1),
  57. rand_int(7, 1, 5),
  58. ]
  59. # boolean arrays which can be broadcast
  60. self.bool_broadcastables = [
  61. rand_bool(),
  62. rand_bool(1),
  63. rand_bool(5),
  64. rand_bool(6, 1),
  65. rand_bool(7, 1, 5),
  66. rand_bool(8, 1, 6, 1),
  67. ]
  68. # core dimension 0 is matched for each
  69. # pair of array[i] and array[i + 1]
  70. self.core_broadcastables = [
  71. rand_int(3),
  72. rand_int(3),
  73. rand_int(6),
  74. rand_int(6, 4),
  75. rand_int(5, 2),
  76. rand_int(2),
  77. rand_int(2, 9),
  78. rand_int(9, 8),
  79. rand_int(6),
  80. rand_int(2, 6, 5),
  81. rand_int(9, 2, 7),
  82. rand_int(7),
  83. rand_int(5, 2, 4),
  84. rand_int(6, 1, 4, 9),
  85. rand_int(7, 1, 5, 3, 2),
  86. rand_int(8, 1, 6, 1, 2, 9),
  87. ]
  88. # arrays with dimensions of size 1
  89. self.nested_arrs = [
  90. rand_int(1),
  91. rand_int(1, 2),
  92. rand_int(3, 1, 8),
  93. rand_int(1, 3, 9, 1),
  94. ]
  95. test_case = Cases()
  96. def mnp_add(x1, x2):
  97. return mnp.add(x1, x2)
  98. def onp_add(x1, x2):
  99. return onp.add(x1, x2)
  100. def mnp_subtract(x1, x2):
  101. return mnp.subtract(x1, x2)
  102. def onp_subtract(x1, x2):
  103. return onp.subtract(x1, x2)
  104. def mnp_mutiply(x1, x2):
  105. return mnp.multiply(x1, x2)
  106. def onp_multiply(x1, x2):
  107. return onp.multiply(x1, x2)
  108. def mnp_divide(x1, x2):
  109. return mnp.divide(x1, x2)
  110. def onp_divide(x1, x2):
  111. return onp.divide(x1, x2)
  112. def mnp_true_divide(x1, x2):
  113. return mnp.true_divide(x1, x2)
  114. def onp_true_divide(x1, x2):
  115. return onp.true_divide(x1, x2)
  116. def mnp_power(x1, x2):
  117. return mnp.power(x1, x2)
  118. def onp_power(x1, x2):
  119. return onp.power(x1, x2)
  120. def mnp_float_power(x1, x2):
  121. return mnp.float_power(x1, x2)
  122. def onp_float_power(x1, x2):
  123. return onp.float_power(x1, x2)
  124. def mnp_minimum(a, b):
  125. return mnp.minimum(a, b)
  126. def onp_minimum(a, b):
  127. return onp.minimum(a, b)
  128. @pytest.mark.level1
  129. @pytest.mark.platform_arm_ascend_training
  130. @pytest.mark.platform_x86_ascend_training
  131. @pytest.mark.platform_x86_gpu_training
  132. @pytest.mark.platform_x86_cpu
  133. @pytest.mark.env_onecard
  134. def test_add():
  135. run_binop_test(mnp_add, onp_add, test_case)
  136. @pytest.mark.level1
  137. @pytest.mark.platform_arm_ascend_training
  138. @pytest.mark.platform_x86_ascend_training
  139. @pytest.mark.platform_x86_gpu_training
  140. @pytest.mark.platform_x86_cpu
  141. @pytest.mark.env_onecard
  142. def test_subtract():
  143. run_binop_test(mnp_subtract, onp_subtract, test_case)
  144. @pytest.mark.level1
  145. @pytest.mark.platform_arm_ascend_training
  146. @pytest.mark.platform_x86_ascend_training
  147. @pytest.mark.platform_x86_gpu_training
  148. @pytest.mark.platform_x86_cpu
  149. @pytest.mark.env_onecard
  150. def test_multiply():
  151. run_binop_test(mnp_mutiply, onp_multiply, test_case)
  152. @pytest.mark.level1
  153. @pytest.mark.platform_arm_ascend_training
  154. @pytest.mark.platform_x86_ascend_training
  155. @pytest.mark.platform_x86_gpu_training
  156. @pytest.mark.platform_x86_cpu
  157. @pytest.mark.env_onecard
  158. def test_divide():
  159. run_binop_test(mnp_divide, onp_divide, test_case)
  160. @pytest.mark.level1
  161. @pytest.mark.platform_arm_ascend_training
  162. @pytest.mark.platform_x86_ascend_training
  163. @pytest.mark.platform_x86_gpu_training
  164. @pytest.mark.platform_x86_cpu
  165. @pytest.mark.env_onecard
  166. def test_true_divide():
  167. run_binop_test(mnp_true_divide, onp_true_divide, test_case)
  168. @pytest.mark.level1
  169. @pytest.mark.platform_arm_ascend_training
  170. @pytest.mark.platform_x86_ascend_training
  171. @pytest.mark.platform_x86_gpu_training
  172. @pytest.mark.platform_x86_cpu
  173. @pytest.mark.env_onecard
  174. def test_power():
  175. run_binop_test(mnp_power, onp_power, test_case, error=1e-5)
  176. @pytest.mark.level1
  177. @pytest.mark.platform_arm_ascend_training
  178. @pytest.mark.platform_x86_ascend_training
  179. @pytest.mark.platform_x86_gpu_training
  180. @pytest.mark.platform_x86_cpu
  181. @pytest.mark.env_onecard
  182. def test_float_power():
  183. run_binop_test(mnp_float_power, onp_float_power, test_case, error=1e-5)
  184. @pytest.mark.level1
  185. @pytest.mark.platform_arm_ascend_training
  186. @pytest.mark.platform_x86_ascend_training
  187. @pytest.mark.platform_x86_gpu_training
  188. @pytest.mark.platform_x86_cpu
  189. @pytest.mark.env_onecard
  190. def test_minimum():
  191. run_binop_test(mnp_minimum, onp_minimum, test_case)
  192. def mnp_tensordot(x, y):
  193. a = mnp.tensordot(x, y)
  194. b = mnp.tensordot(x, y, axes=0)
  195. c = mnp.tensordot(x, y, axes=1)
  196. d = mnp.tensordot(x, y, axes=2)
  197. e = mnp.tensordot(x, y, axes=(3, 0))
  198. f = mnp.tensordot(x, y, axes=[2, 1])
  199. g = mnp.tensordot(x, y, axes=((2, 3), (0, 1)))
  200. h = mnp.tensordot(x, y, axes=[[3, 2], [1, 0]])
  201. return a, b, c, d, e, f, g, h
  202. def onp_tensordot(x, y):
  203. a = onp.tensordot(x, y)
  204. b = onp.tensordot(x, y, axes=0)
  205. c = onp.tensordot(x, y, axes=1)
  206. d = onp.tensordot(x, y, axes=2)
  207. e = onp.tensordot(x, y, axes=(3, 0))
  208. f = onp.tensordot(x, y, axes=[2, 1])
  209. g = onp.tensordot(x, y, axes=((2, 3), (0, 1)))
  210. h = onp.tensordot(x, y, axes=[[3, 2], [1, 0]])
  211. return a, b, c, d, e, f, g, h
  212. @pytest.mark.level1
  213. @pytest.mark.platform_arm_ascend_training
  214. @pytest.mark.platform_x86_ascend_training
  215. @pytest.mark.platform_x86_gpu_training
  216. @pytest.mark.platform_x86_cpu
  217. @pytest.mark.env_onecard
  218. def test_tensordot():
  219. x = rand_int(4, 2, 7, 7)
  220. y = rand_int(7, 7, 6)
  221. run_multi_test(mnp_tensordot, onp_tensordot, (x, y))
  222. def mnp_std(x):
  223. a = mnp.std(x)
  224. b = mnp.std(x, axis=None)
  225. c = mnp.std(x, axis=0)
  226. d = mnp.std(x, axis=1)
  227. e = mnp.std(x, axis=(-1, 1))
  228. f = mnp.std(x, axis=(0, 1, 2))
  229. g = mnp.std(x, axis=None, ddof=1, keepdims=True)
  230. h = mnp.std(x, axis=0, ddof=1, keepdims=True)
  231. i = mnp.std(x, axis=(2), ddof=1, keepdims=True)
  232. return a, b, c, d, e, f, g, h, i
  233. def onp_std(x):
  234. a = onp.std(x)
  235. b = onp.std(x, axis=None)
  236. c = onp.std(x, axis=0)
  237. d = onp.std(x, axis=1)
  238. e = onp.std(x, axis=(-1, 1))
  239. f = onp.std(x, axis=(0, 1, 2))
  240. g = onp.std(x, axis=None, ddof=1, keepdims=True)
  241. h = onp.std(x, axis=0, ddof=1, keepdims=True)
  242. i = onp.std(x, axis=(2), ddof=1, keepdims=True)
  243. return a, b, c, d, e, f, g, h, i
  244. @pytest.mark.level1
  245. @pytest.mark.platform_arm_ascend_training
  246. @pytest.mark.platform_x86_ascend_training
  247. @pytest.mark.platform_x86_gpu_training
  248. @pytest.mark.platform_x86_cpu
  249. @pytest.mark.env_onecard
  250. def test_std():
  251. arr1 = rand_int(2, 3, 4, 5)
  252. arr2 = rand_int(4, 5, 4, 3, 3)
  253. run_single_test(mnp_std, onp_std, arr1, error=1e-5)
  254. run_single_test(mnp_std, onp_std, arr2, error=1e-5)
  255. def mnp_nanstd(x):
  256. a = mnp.nanstd(x)
  257. b = mnp.nanstd(x, axis=None)
  258. c = mnp.nanstd(x, axis=0)
  259. d = mnp.nanstd(x, axis=1)
  260. e = mnp.nanstd(x, axis=(-1, 1))
  261. f = mnp.nanstd(x, axis=(0, 1, 2))
  262. g = mnp.nanstd(x, axis=None, ddof=1, keepdims=True)
  263. h = mnp.nanstd(x, axis=0, ddof=1, keepdims=True)
  264. i = mnp.nanstd(x, axis=(2), ddof=1, keepdims=True)
  265. return a, b, c, d, e, f, g, h, i
  266. def onp_nanstd(x):
  267. a = onp.nanstd(x)
  268. b = onp.nanstd(x, axis=None)
  269. c = onp.nanstd(x, axis=0)
  270. d = onp.nanstd(x, axis=1)
  271. e = onp.nanstd(x, axis=(-1, 1))
  272. f = onp.nanstd(x, axis=(0, 1, 2))
  273. g = onp.nanstd(x, axis=None, ddof=1, keepdims=True)
  274. h = onp.nanstd(x, axis=0, ddof=1, keepdims=True)
  275. i = onp.nanstd(x, axis=(2), ddof=1, keepdims=True)
  276. return a, b, c, d, e, f, g, h, i
  277. @pytest.mark.level1
  278. @pytest.mark.platform_x86_gpu_training
  279. @pytest.mark.platform_x86_cpu
  280. @pytest.mark.env_onecard
  281. def test_nanstd():
  282. arr1 = rand_int(2, 3, 4, 5)
  283. arr1[0][2][1][3] = onp.nan
  284. arr1[1][0][2][4] = onp.nan
  285. arr1[1][1][1][1] = onp.nan
  286. arr2 = rand_int(4, 5, 4, 3, 3)
  287. arr2[3][1][2][1][0] = onp.nan
  288. arr2[1][1][1][1][1] = onp.nan
  289. arr2[0][4][3][0][2] = onp.nan
  290. run_single_test(mnp_nanstd, onp_nanstd, arr1, error=1e-5)
  291. run_single_test(mnp_nanstd, onp_nanstd, arr2, error=1e-5)
  292. def mnp_var(x):
  293. a = mnp.var(x)
  294. b = mnp.var(x, axis=0)
  295. c = mnp.var(x, axis=(0))
  296. d = mnp.var(x, axis=(0, 1, 2))
  297. e = mnp.var(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
  298. return a, b, c, d, e
  299. def onp_var(x):
  300. a = onp.var(x)
  301. b = onp.var(x, axis=0)
  302. c = onp.var(x, axis=(0))
  303. d = onp.var(x, axis=(0, 1, 2))
  304. e = onp.var(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
  305. return a, b, c, d, e
  306. @pytest.mark.level1
  307. @pytest.mark.platform_arm_ascend_training
  308. @pytest.mark.platform_x86_ascend_training
  309. @pytest.mark.platform_x86_gpu_training
  310. @pytest.mark.platform_x86_cpu
  311. @pytest.mark.env_onecard
  312. def test_var():
  313. arr1 = rand_int(2, 3, 4, 5)
  314. arr2 = rand_int(4, 5, 4, 3, 3)
  315. run_single_test(mnp_var, onp_var, arr1, error=1e-5)
  316. run_single_test(mnp_var, onp_var, arr2, error=1e-5)
  317. def mnp_nanvar(x):
  318. a = mnp.var(x)
  319. b = mnp.var(x, axis=0)
  320. c = mnp.var(x, axis=(0))
  321. d = mnp.var(x, axis=(0, 1, 2))
  322. e = mnp.var(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
  323. return a, b, c, d, e
  324. def onp_nanvar(x):
  325. a = onp.var(x)
  326. b = onp.var(x, axis=0)
  327. c = onp.var(x, axis=(0))
  328. d = onp.var(x, axis=(0, 1, 2))
  329. e = onp.var(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
  330. return a, b, c, d, e
  331. @pytest.mark.level1
  332. @pytest.mark.platform_x86_gpu_training
  333. @pytest.mark.platform_x86_cpu
  334. @pytest.mark.env_onecard
  335. def test_nanvar():
  336. arr1 = rand_int(2, 3, 4, 5)
  337. arr1[0][2][1][3] = onp.nan
  338. arr1[1][0][2][4] = onp.nan
  339. arr1[1][1][1][1] = onp.nan
  340. arr2 = rand_int(4, 5, 4, 3, 3)
  341. arr2[3][1][2][1][0] = onp.nan
  342. arr2[1][1][1][1][1] = onp.nan
  343. arr2[0][4][3][0][2] = onp.nan
  344. run_single_test(mnp_nanvar, onp_nanvar, arr1, error=1e-5)
  345. run_single_test(mnp_nanvar, onp_nanvar, arr2, error=1e-5)
  346. def mnp_average(x):
  347. a = mnp.average(x)
  348. b = mnp.average(x, axis=None)
  349. c = mnp.average(x, axis=0)
  350. d = mnp.average(x, axis=1)
  351. e = mnp.average(x, axis=(-2, 1))
  352. f = mnp.average(x, axis=(0, 1, 2, 3))
  353. g = mnp.average(x, axis=None, weights=x)
  354. h = mnp.average(x, axis=0, weights=x)
  355. i = mnp.average(x, axis=(1, 2, 3), weights=x)
  356. return a, b, c, d, e, f, g, h, i
  357. def onp_average(x):
  358. a = onp.average(x)
  359. b = onp.average(x, axis=None)
  360. c = onp.average(x, axis=0)
  361. d = onp.average(x, axis=1)
  362. e = onp.average(x, axis=(-2, 1))
  363. f = onp.average(x, axis=(0, 1, 2, 3))
  364. g = onp.average(x, axis=None, weights=x)
  365. h = onp.average(x, axis=0, weights=x)
  366. i = onp.average(x, axis=(1, 2, 3), weights=x)
  367. return a, b, c, d, e, f, g, h, i
  368. @pytest.mark.level1
  369. @pytest.mark.platform_arm_ascend_training
  370. @pytest.mark.platform_x86_ascend_training
  371. @pytest.mark.platform_x86_gpu_training
  372. @pytest.mark.platform_x86_cpu
  373. @pytest.mark.env_onecard
  374. def test_average():
  375. arr1 = rand_int(2, 3, 4, 5)
  376. arr2 = rand_int(4, 5, 1, 3, 1)
  377. run_single_test(mnp_average, onp_average, arr1, error=1e-5)
  378. run_single_test(mnp_average, onp_average, arr2, error=1e-5)
  379. def mnp_count_nonzero(x):
  380. a = mnp.count_nonzero(x)
  381. b = mnp.count_nonzero(x, axis=None)
  382. c = mnp.count_nonzero(x, axis=0)
  383. d = mnp.count_nonzero(x, axis=1)
  384. e = mnp.count_nonzero(x, axis=(-2, 1))
  385. f = mnp.count_nonzero(x, axis=(0, 1, 2, 3))
  386. return a, b, c, d, e, f
  387. def onp_count_nonzero(x):
  388. a = onp.count_nonzero(x)
  389. b = onp.count_nonzero(x, axis=None)
  390. c = onp.count_nonzero(x, axis=0)
  391. d = onp.count_nonzero(x, axis=1)
  392. e = onp.count_nonzero(x, axis=(-2, 1))
  393. f = onp.count_nonzero(x, axis=(0, 1, 2, 3))
  394. return a, b, c, d, e, f
  395. @pytest.mark.level1
  396. @pytest.mark.platform_arm_ascend_training
  397. @pytest.mark.platform_x86_ascend_training
  398. @pytest.mark.platform_x86_gpu_training
  399. @pytest.mark.platform_x86_cpu
  400. @pytest.mark.env_onecard
  401. def test_count_nonzero():
  402. # minus 5 to make some values below zero
  403. arr1 = rand_int(2, 3, 4, 5) - 5
  404. arr2 = rand_int(4, 5, 4, 3, 3) - 5
  405. run_single_test(mnp_count_nonzero, onp_count_nonzero, arr1)
  406. run_single_test(mnp_count_nonzero, onp_count_nonzero, arr2)
  407. def mnp_inner(a, b):
  408. return mnp.inner(a, b)
  409. def onp_inner(a, b):
  410. return onp.inner(a, b)
  411. @pytest.mark.level1
  412. @pytest.mark.platform_arm_ascend_training
  413. @pytest.mark.platform_x86_ascend_training
  414. @pytest.mark.platform_x86_gpu_training
  415. @pytest.mark.platform_x86_cpu
  416. @pytest.mark.env_onecard
  417. def test_inner():
  418. for arr1 in test_case.aligned_arrs:
  419. for arr2 in test_case.aligned_arrs:
  420. match_res(mnp_inner, onp_inner, arr1, arr2)
  421. for scalar1 in test_case.scalars:
  422. for scalar2 in test_case.scalars:
  423. match_res(mnp_inner, onp_inner,
  424. scalar1, scalar2)
  425. def mnp_dot(a, b):
  426. return mnp.dot(a, b)
  427. def onp_dot(a, b):
  428. return onp.dot(a, b)
  429. @pytest.mark.level1
  430. @pytest.mark.platform_arm_ascend_training
  431. @pytest.mark.platform_x86_ascend_training
  432. @pytest.mark.platform_x86_gpu_training
  433. @pytest.mark.platform_x86_cpu
  434. @pytest.mark.env_onecard
  435. def test_dot():
  436. # test case (1D, 1D)
  437. match_res(mnp_dot, onp_dot, rand_int(3), rand_int(3))
  438. # test case (2D, 2D)
  439. match_res(mnp_dot, onp_dot, rand_int(4, 7), rand_int(7, 2))
  440. # test case (0D, _) (_, 0D)
  441. match_res(mnp_dot, onp_dot, rand_int(), rand_int(1, 9, 3))
  442. match_res(mnp_dot, onp_dot, rand_int(8, 5, 6, 3), rand_int())
  443. # test case (ND, 1D)
  444. match_res(mnp_dot, onp_dot, rand_int(2, 4, 5), rand_int(5))
  445. # test case (ND, MD)
  446. match_res(mnp_dot, onp_dot, rand_int(5, 4, 1, 8), rand_int(8, 3))
  447. for i in range(8):
  448. match_res(mnp_dot, onp_dot,
  449. test_case.core_broadcastables[2*i], test_case.core_broadcastables[2*i + 1])
  450. def mnp_outer(a, b):
  451. return mnp.outer(a, b)
  452. def onp_outer(a, b):
  453. return onp.outer(a, b)
  454. @pytest.mark.level1
  455. @pytest.mark.platform_arm_ascend_training
  456. @pytest.mark.platform_x86_ascend_training
  457. @pytest.mark.platform_x86_gpu_training
  458. @pytest.mark.platform_x86_cpu
  459. @pytest.mark.env_onecard
  460. def test_outer():
  461. run_binop_test(mnp_outer, onp_outer, test_case)
  462. @pytest.mark.level1
  463. @pytest.mark.platform_arm_ascend_training
  464. @pytest.mark.platform_x86_ascend_training
  465. @pytest.mark.platform_x86_gpu_training
  466. @pytest.mark.platform_x86_cpu
  467. @pytest.mark.env_onecard
  468. def test_type_promotion():
  469. arr = rand_int(2, 3)
  470. onp_sum = onp_add(arr, arr)
  471. a = to_tensor(arr, dtype=mnp.float16)
  472. b = to_tensor(arr, dtype=mnp.float32)
  473. c = to_tensor(arr, dtype=mnp.int32)
  474. match_array(mnp_add(a, b).asnumpy(), onp_sum)
  475. match_array(mnp_add(b, c).asnumpy(), onp_sum)
  476. def mnp_absolute(x):
  477. return mnp.absolute(x)
  478. def onp_absolute(x):
  479. return onp.absolute(x)
  480. @pytest.mark.level1
  481. @pytest.mark.platform_arm_ascend_training
  482. @pytest.mark.platform_x86_ascend_training
  483. @pytest.mark.platform_x86_gpu_training
  484. @pytest.mark.platform_x86_cpu
  485. @pytest.mark.env_onecard
  486. def test_absolute():
  487. arr = rand_int(2, 3)
  488. a = to_tensor(arr, dtype=mnp.float16)
  489. b = to_tensor(arr, dtype=mnp.float32)
  490. c = to_tensor(arr, dtype=mnp.uint8)
  491. d = to_tensor(arr, dtype=mnp.bool_)
  492. match_array(mnp_absolute(a).asnumpy(), onp_absolute(a.asnumpy()))
  493. match_array(mnp_absolute(b).asnumpy(), onp_absolute(b.asnumpy()))
  494. match_array(mnp_absolute(c).asnumpy(), onp_absolute(c.asnumpy()))
  495. match_array(mnp_absolute(d).asnumpy(), onp_absolute(d.asnumpy()))
  496. @pytest.mark.level1
  497. @pytest.mark.platform_arm_ascend_training
  498. @pytest.mark.platform_x86_ascend_training
  499. @pytest.mark.platform_x86_gpu_training
  500. @pytest.mark.platform_x86_cpu
  501. @pytest.mark.env_onecard
  502. def test_deg2rad_rad2deg():
  503. arrs = [rand_int(2, 3), rand_int(1, 2, 4), rand_int(2, 4)]
  504. for arr in arrs:
  505. match_res(mnp.deg2rad, onp.deg2rad, arr)
  506. match_res(mnp.rad2deg, onp.rad2deg, arr)
  507. def mnp_ptp(x):
  508. a = mnp.ptp(x)
  509. b = mnp.ptp(x, keepdims=True)
  510. c = mnp.ptp(x, axis=(0, 1))
  511. d = mnp.ptp(x, axis=-1)
  512. return a, b, c, d
  513. def onp_ptp(x):
  514. a = onp.ptp(x)
  515. b = onp.ptp(x, keepdims=True)
  516. c = onp.ptp(x, axis=(0, 1))
  517. d = onp.ptp(x, axis=-1)
  518. return a, b, c, d
  519. @pytest.mark.level1
  520. @pytest.mark.platform_arm_ascend_training
  521. @pytest.mark.platform_x86_ascend_training
  522. @pytest.mark.platform_x86_gpu_training
  523. @pytest.mark.platform_x86_cpu
  524. @pytest.mark.env_onecard
  525. def test_ptp():
  526. arrs = [rand_int(2, 3), rand_int(1, 2, 4), rand_int(2, 4)]
  527. for arr in arrs:
  528. match_res(mnp_ptp, onp_ptp, arr)
  529. def mnp_add_dtype(x1, x2):
  530. return mnp.add(x1, x2, dtype=mnp.float16)
  531. def onp_add_dtype(x1, x2):
  532. return onp.add(x1, x2, dtype=onp.float16)
  533. @pytest.mark.level1
  534. @pytest.mark.platform_arm_ascend_training
  535. @pytest.mark.platform_x86_ascend_training
  536. @pytest.mark.platform_x86_gpu_training
  537. @pytest.mark.platform_x86_cpu
  538. @pytest.mark.env_onecard
  539. def test_add_dtype():
  540. x1 = rand_int(2, 3).astype('int32')
  541. x2 = rand_int(2, 3).astype('int32')
  542. arrs = (x1, x2)
  543. mnp_arrs = map(to_tensor, arrs)
  544. mnp_res = mnp_add_dtype(*mnp_arrs)
  545. onp_res = onp_add_dtype(*arrs)
  546. for actual, expected in zip(mnp_res, onp_res):
  547. assert actual.asnumpy().dtype == expected.dtype
  548. def mnp_matmul(x1, x2):
  549. return mnp.matmul(x1, x2)
  550. def onp_matmul(x1, x2):
  551. return onp.matmul(x1, x2)
  552. @pytest.mark.level1
  553. @pytest.mark.platform_arm_ascend_training
  554. @pytest.mark.platform_x86_ascend_training
  555. @pytest.mark.platform_x86_gpu_training
  556. @pytest.mark.platform_x86_cpu
  557. @pytest.mark.env_onecard
  558. def test_matmul():
  559. for scalar1 in test_case.scalars[1:]:
  560. for scalar2 in test_case.scalars[1:]:
  561. match_res(mnp_matmul, onp_matmul,
  562. scalar1, scalar2)
  563. for i in range(8):
  564. match_res(mnp_matmul, onp_matmul,
  565. test_case.core_broadcastables[2*i],
  566. test_case.core_broadcastables[2*i + 1])
  567. def mnp_square(x):
  568. return mnp.square(x)
  569. def onp_square(x):
  570. return onp.square(x)
  571. @pytest.mark.level1
  572. @pytest.mark.platform_arm_ascend_training
  573. @pytest.mark.platform_x86_ascend_training
  574. @pytest.mark.platform_x86_gpu_training
  575. @pytest.mark.platform_x86_cpu
  576. @pytest.mark.env_onecard
  577. def test_square():
  578. run_unary_test(mnp_square, onp_square, test_case)
  579. def mnp_sqrt(x):
  580. return mnp.sqrt(x)
  581. def onp_sqrt(x):
  582. return onp.sqrt(x)
  583. @pytest.mark.level1
  584. @pytest.mark.platform_arm_ascend_training
  585. @pytest.mark.platform_x86_ascend_training
  586. @pytest.mark.platform_x86_gpu_training
  587. @pytest.mark.platform_x86_cpu
  588. @pytest.mark.env_onecard
  589. def test_sqrt():
  590. run_unary_test(mnp_sqrt, onp_sqrt, test_case)
  591. def mnp_reciprocal(x):
  592. return mnp.reciprocal(x)
  593. def onp_reciprocal(x):
  594. return onp.reciprocal(x)
  595. @pytest.mark.level1
  596. @pytest.mark.platform_arm_ascend_training
  597. @pytest.mark.platform_x86_ascend_training
  598. @pytest.mark.platform_x86_gpu_training
  599. @pytest.mark.platform_x86_cpu
  600. @pytest.mark.env_onecard
  601. def test_reciprocal():
  602. run_unary_test(mnp_reciprocal, onp_reciprocal, test_case)
  603. def mnp_log(x):
  604. return mnp.log(x)
  605. def onp_log(x):
  606. return onp.log(x)
  607. @pytest.mark.level1
  608. @pytest.mark.platform_arm_ascend_training
  609. @pytest.mark.platform_x86_ascend_training
  610. @pytest.mark.platform_x86_gpu_training
  611. @pytest.mark.platform_x86_cpu
  612. @pytest.mark.env_onecard
  613. def test_log():
  614. run_unary_test(mnp.log, onp.log, test_case, error=1e-5)
  615. def mnp_log1p(x):
  616. return mnp.log1p(x)
  617. def onp_log1p(x):
  618. return onp.log1p(x)
  619. @pytest.mark.level1
  620. @pytest.mark.platform_arm_ascend_training
  621. @pytest.mark.platform_x86_ascend_training
  622. @pytest.mark.platform_x86_gpu_training
  623. @pytest.mark.platform_x86_cpu
  624. @pytest.mark.env_onecard
  625. def test_log1p():
  626. run_unary_test(mnp_log1p, onp_log1p, test_case, error=1e-5)
  627. def mnp_logaddexp(x1, x2):
  628. return mnp.logaddexp(x1, x2)
  629. def onp_logaddexp(x1, x2):
  630. return onp.logaddexp(x1, x2)
  631. @pytest.mark.level1
  632. @pytest.mark.platform_arm_ascend_training
  633. @pytest.mark.platform_x86_ascend_training
  634. @pytest.mark.platform_x86_gpu_training
  635. @pytest.mark.platform_x86_cpu
  636. @pytest.mark.env_onecard
  637. def test_logaddexp():
  638. test_cases = [
  639. onp.random.randint(1, 5, (2)).astype('float16'),
  640. onp.random.randint(1, 5, (3, 2)).astype('float16'),
  641. onp.random.randint(1, 5, (1, 3, 2)).astype('float16'),
  642. onp.random.randint(1, 5, (5, 6, 3, 2)).astype('float16')]
  643. for _, x1 in enumerate(test_cases):
  644. for _, x2 in enumerate(test_cases):
  645. expected = onp_logaddexp(x1, x2)
  646. actual = mnp_logaddexp(to_tensor(x1), to_tensor(x2))
  647. onp.testing.assert_almost_equal(actual.asnumpy().tolist(), expected.tolist(),
  648. decimal=2)
  649. def mnp_log2(x):
  650. return mnp.log2(x)
  651. def onp_log2(x):
  652. return onp.log2(x)
  653. @pytest.mark.level1
  654. @pytest.mark.platform_arm_ascend_training
  655. @pytest.mark.platform_x86_ascend_training
  656. @pytest.mark.platform_x86_gpu_training
  657. @pytest.mark.platform_x86_cpu
  658. @pytest.mark.env_onecard
  659. def test_log2():
  660. run_unary_test(mnp_log2, onp_log2, test_case, error=1e-5)
  661. def mnp_logaddexp2(x1, x2):
  662. return mnp.logaddexp2(x1, x2)
  663. def onp_logaddexp2(x1, x2):
  664. return onp.logaddexp2(x1, x2)
  665. @pytest.mark.level1
  666. @pytest.mark.platform_arm_ascend_training
  667. @pytest.mark.platform_x86_ascend_training
  668. @pytest.mark.platform_x86_gpu_training
  669. @pytest.mark.platform_x86_cpu
  670. @pytest.mark.env_onecard
  671. def test_logaddexp2():
  672. test_cases = [
  673. onp.random.randint(1, 5, (2)).astype('float16'),
  674. onp.random.randint(1, 5, (3, 2)).astype('float16'),
  675. onp.random.randint(1, 5, (1, 3, 2)).astype('float16'),
  676. onp.random.randint(1, 5, (5, 6, 3, 2)).astype('float16')]
  677. for _, x1 in enumerate(test_cases):
  678. for _, x2 in enumerate(test_cases):
  679. expected = onp_logaddexp2(x1, x2)
  680. actual = mnp_logaddexp2(to_tensor(x1), to_tensor(x2))
  681. onp.testing.assert_almost_equal(actual.asnumpy().tolist(), expected.tolist(),
  682. decimal=2)
  683. def mnp_log10(x):
  684. return mnp.log10(x)
  685. def onp_log10(x):
  686. return onp.log10(x)
  687. @pytest.mark.level1
  688. @pytest.mark.platform_arm_ascend_training
  689. @pytest.mark.platform_x86_ascend_training
  690. @pytest.mark.platform_x86_gpu_training
  691. @pytest.mark.platform_x86_cpu
  692. @pytest.mark.env_onecard
  693. def test_log10():
  694. run_unary_test(mnp_log10, onp_log10, test_case, error=1e-5)
  695. def mnp_maximum(x1, x2):
  696. return mnp.maximum(x1, x2)
  697. def onp_maximum(x1, x2):
  698. return onp.maximum(x1, x2)
  699. @pytest.mark.level1
  700. @pytest.mark.platform_arm_ascend_training
  701. @pytest.mark.platform_x86_ascend_training
  702. @pytest.mark.platform_x86_gpu_training
  703. @pytest.mark.platform_x86_cpu
  704. @pytest.mark.env_onecard
  705. def test_maximum():
  706. run_binop_test(mnp_maximum, onp_maximum, test_case)
  707. def mnp_clip(x):
  708. a = mnp.clip(x, to_tensor(10.0), to_tensor([2,]))
  709. b = mnp.clip(x, 0, 1)
  710. c = mnp.clip(x, to_tensor(0), to_tensor(10), dtype=mnp.float32)
  711. return a, b, c
  712. def onp_clip(x):
  713. a = onp.clip(x, onp.asarray(10.0), onp.asarray([2,]))
  714. b = onp.clip(x, 0, 1)
  715. c = onp.clip(x, onp.asarray(0), onp.asarray(10), dtype=onp.float32)
  716. return a, b, c
  717. @pytest.mark.level1
  718. @pytest.mark.platform_arm_ascend_training
  719. @pytest.mark.platform_x86_ascend_training
  720. @pytest.mark.platform_x86_gpu_training
  721. @pytest.mark.platform_x86_cpu
  722. @pytest.mark.env_onecard
  723. def test_clip():
  724. run_unary_test(mnp_clip, onp_clip, test_case)
  725. def mnp_amax(x, mask):
  726. a = mnp.amax(x)
  727. b = mnp.amax(x, axis=-3)
  728. c = mnp.amax(x, keepdims=True)
  729. d = mnp.amax(x, initial=3)
  730. e = mnp.amax(x, axis=(0, 1), keepdims=True)
  731. f = mnp.amax(x, initial=4, where=mask)
  732. g = mnp.amax(x, initial=5, where=mask, keepdims=True)
  733. h = mnp.amax(x, axis=(1, 2, 3), initial=6, where=mask)
  734. return a, b, c, d, e, f, g, h
  735. def onp_amax(x, mask):
  736. a = onp.amax(x)
  737. b = onp.amax(x, axis=-3)
  738. c = onp.amax(x, keepdims=True)
  739. d = onp.amax(x, initial=3)
  740. e = onp.amax(x, axis=(0, 1), keepdims=True)
  741. f = onp.amax(x, initial=4, where=mask)
  742. g = onp.amax(x, initial=5, where=mask, keepdims=True)
  743. h = onp.amax(x, axis=(1, 2, 3), initial=6, where=mask)
  744. return a, b, c, d, e, f, g, h
  745. @pytest.mark.level1
  746. @pytest.mark.platform_arm_ascend_training
  747. @pytest.mark.platform_x86_ascend_training
  748. @pytest.mark.platform_x86_gpu_training
  749. @pytest.mark.platform_x86_cpu
  750. @pytest.mark.env_onecard
  751. def test_amax():
  752. a = rand_int(2, 3, 4, 5).astype('float32')
  753. mask = rand_bool(2, 3, 4, 5)
  754. run_multi_test(mnp_amax, onp_amax, (a, mask))
  755. def mnp_amin(x, mask):
  756. a = mnp.amin(x)
  757. b = mnp.amin(x, axis=-3)
  758. c = mnp.amin(x, keepdims=True)
  759. d = mnp.amin(x, initial=-1)
  760. e = mnp.amin(x, axis=(0, 1), keepdims=True)
  761. f = mnp.amin(x, initial=-2)
  762. g = mnp.amin(x, initial=-3, keepdims=True)
  763. h = mnp.amin(x, axis=(1, 2, 3), initial=-4, where=mask)
  764. return a, b, c, d, e, f, g, h
  765. def onp_amin(x, mask):
  766. a = onp.amin(x)
  767. b = onp.amin(x, axis=-3)
  768. c = onp.amin(x, keepdims=True)
  769. d = onp.amin(x, initial=-1)
  770. e = onp.amin(x, axis=(0, 1), keepdims=True)
  771. f = onp.amin(x, initial=-2)
  772. g = onp.amin(x, initial=-3, keepdims=True)
  773. h = onp.amin(x, axis=(1, 2, 3), initial=-4, where=mask)
  774. return a, b, c, d, e, f, g, h
  775. @pytest.mark.level1
  776. @pytest.mark.platform_arm_ascend_training
  777. @pytest.mark.platform_x86_ascend_training
  778. @pytest.mark.platform_x86_gpu_training
  779. @pytest.mark.platform_x86_cpu
  780. @pytest.mark.env_onecard
  781. def test_amin():
  782. a = rand_int(2, 3, 4, 5).astype('float32')
  783. mask = rand_bool(2, 3, 4, 5)
  784. run_multi_test(mnp_amin, onp_amin, (a, mask))
  785. def mnp_hypot(x1, x2):
  786. return mnp.hypot(x1, x2)
  787. def onp_hypot(x1, x2):
  788. return onp.hypot(x1, x2)
  789. @pytest.mark.level1
  790. @pytest.mark.platform_arm_ascend_training
  791. @pytest.mark.platform_x86_ascend_training
  792. @pytest.mark.platform_x86_gpu_training
  793. @pytest.mark.platform_x86_cpu
  794. @pytest.mark.env_onecard
  795. def test_hypot():
  796. run_binop_test(mnp_hypot, onp_hypot, test_case)
  797. def mnp_heaviside(x1, x2):
  798. return mnp.heaviside(x1, x2)
  799. def onp_heaviside(x1, x2):
  800. return onp.heaviside(x1, x2)
  801. @pytest.mark.level1
  802. @pytest.mark.platform_arm_ascend_training
  803. @pytest.mark.platform_x86_ascend_training
  804. @pytest.mark.platform_x86_gpu_training
  805. @pytest.mark.platform_x86_cpu
  806. @pytest.mark.env_onecard
  807. def test_heaviside():
  808. broadcastables = test_case.broadcastables
  809. for b1 in broadcastables:
  810. for b2 in broadcastables:
  811. b = onp.subtract(b1, b2)
  812. match_res(mnp_heaviside, onp_heaviside, b, b1)
  813. match_res(mnp_heaviside, onp_heaviside, b, b2)
  814. def mnp_floor(x):
  815. return mnp.floor(x)
  816. def onp_floor(x):
  817. return onp.floor(x)
  818. @pytest.mark.level1
  819. @pytest.mark.platform_arm_ascend_training
  820. @pytest.mark.platform_x86_ascend_training
  821. @pytest.mark.platform_x86_gpu_training
  822. @pytest.mark.platform_x86_cpu
  823. @pytest.mark.env_onecard
  824. def test_floor():
  825. run_unary_test(mnp_floor, onp_floor, test_case)
  826. x = rand_int(2, 3) * onp.random.rand(2, 3)
  827. match_res(mnp_floor, onp_floor, x)
  828. match_res(mnp_floor, onp_floor, -x)
  829. def mnp_floor_divide(x, y):
  830. return mnp.floor_divide(x, y)
  831. def onp_floor_divde(x, y):
  832. return onp.floor_divide(x, y)
  833. @pytest.mark.level1
  834. @pytest.mark.platform_arm_ascend_training
  835. @pytest.mark.platform_x86_ascend_training
  836. @pytest.mark.platform_x86_gpu_training
  837. @pytest.mark.platform_x86_cpu
  838. @pytest.mark.env_onecard
  839. def test_floor_divide():
  840. run_binop_test(mnp_floor_divide, onp_floor_divde, test_case)
  841. def mnp_remainder(x, y):
  842. return mnp.remainder(x, y)
  843. def onp_remainder(x, y):
  844. return onp.remainder(x, y)
  845. @pytest.mark.level1
  846. @pytest.mark.platform_arm_ascend_training
  847. @pytest.mark.platform_x86_ascend_training
  848. @pytest.mark.platform_x86_gpu_training
  849. @pytest.mark.platform_x86_cpu
  850. @pytest.mark.env_onecard
  851. def test_remainder():
  852. x = rand_int(2, 3)
  853. y = rand_int(2, 3)
  854. match_res(mnp_remainder, onp_remainder, x, y)
  855. def mnp_mod(x, y):
  856. return mnp.mod(x, y)
  857. def onp_mod(x, y):
  858. return onp.mod(x, y)
  859. @pytest.mark.level1
  860. @pytest.mark.platform_arm_ascend_training
  861. @pytest.mark.platform_x86_ascend_training
  862. @pytest.mark.platform_x86_gpu_training
  863. @pytest.mark.platform_x86_cpu
  864. @pytest.mark.env_onecard
  865. def test_mod():
  866. x = rand_int(2, 3)
  867. y = rand_int(2, 3)
  868. match_res(mnp_mod, onp_mod, x, y)
  869. def mnp_fmod(x, y):
  870. return mnp.fmod(x, y)
  871. def onp_fmod(x, y):
  872. return onp.fmod(x, y)
  873. @pytest.mark.level1
  874. @pytest.mark.platform_x86_gpu_training
  875. @pytest.mark.platform_x86_cpu
  876. @pytest.mark.env_onecard
  877. def test_fmod():
  878. x = rand_int(2, 3)
  879. y = rand_int(2, 3)
  880. match_res(mnp_fmod, onp_fmod, x, y)
  881. def mnp_fix(x):
  882. return mnp.fix(x)
  883. def onp_fix(x):
  884. return onp.fix(x)
  885. @pytest.mark.level1
  886. @pytest.mark.platform_arm_ascend_training
  887. @pytest.mark.platform_x86_ascend_training
  888. @pytest.mark.platform_x86_gpu_training
  889. @pytest.mark.platform_x86_cpu
  890. @pytest.mark.env_onecard
  891. def test_fix():
  892. x = rand_int(2, 3)
  893. y = rand_int(2, 3)
  894. floats = onp.divide(onp.subtract(x, y), y)
  895. match_res(mnp_fix, onp_fix, floats, error=1e-5)
  896. def mnp_trunc(x):
  897. return mnp.trunc(x)
  898. def onp_trunc(x):
  899. return onp.trunc(x)
  900. @pytest.mark.level1
  901. @pytest.mark.platform_arm_ascend_training
  902. @pytest.mark.platform_x86_ascend_training
  903. @pytest.mark.platform_x86_gpu_training
  904. @pytest.mark.platform_x86_cpu
  905. @pytest.mark.env_onecard
  906. def test_trunc():
  907. x = rand_int(2, 3)
  908. y = rand_int(2, 3)
  909. floats = onp.divide(onp.subtract(x, y), y)
  910. match_res(mnp_trunc, onp_trunc, floats, error=1e-5)
  911. def mnp_exp(x):
  912. return mnp.exp(x)
  913. def onp_exp(x):
  914. return onp.exp(x)
  915. @pytest.mark.level1
  916. @pytest.mark.platform_arm_ascend_training
  917. @pytest.mark.platform_x86_ascend_training
  918. @pytest.mark.platform_x86_gpu_training
  919. @pytest.mark.platform_x86_cpu
  920. @pytest.mark.env_onecard
  921. def test_exp():
  922. run_unary_test(mnp_exp, onp_exp, test_case, error=5)
  923. def mnp_expm1(x):
  924. return mnp.expm1(x)
  925. def onp_expm1(x):
  926. return onp.expm1(x)
  927. @pytest.mark.level1
  928. @pytest.mark.platform_arm_ascend_training
  929. @pytest.mark.platform_x86_ascend_training
  930. @pytest.mark.platform_x86_gpu_training
  931. @pytest.mark.platform_x86_cpu
  932. @pytest.mark.env_onecard
  933. def test_expm1():
  934. run_unary_test(mnp_expm1, onp_expm1, test_case, error=5)
  935. def mnp_exp2(x):
  936. return mnp.exp2(x)
  937. def onp_exp2(x):
  938. return onp.exp2(x)
  939. @pytest.mark.level1
  940. @pytest.mark.platform_arm_ascend_training
  941. @pytest.mark.platform_x86_ascend_training
  942. @pytest.mark.platform_x86_gpu_training
  943. @pytest.mark.platform_x86_cpu
  944. @pytest.mark.env_onecard
  945. def test_exp2():
  946. run_unary_test(mnp_exp2, onp_exp2, test_case, error=5)
  947. def mnp_kron(x, y):
  948. return mnp.kron(x, y)
  949. def onp_kron(x, y):
  950. return onp.kron(x, y)
  951. @pytest.mark.level1
  952. @pytest.mark.platform_arm_ascend_training
  953. @pytest.mark.platform_x86_ascend_training
  954. @pytest.mark.platform_x86_gpu_training
  955. @pytest.mark.platform_x86_cpu
  956. @pytest.mark.env_onecard
  957. def test_kron():
  958. run_binop_test(mnp_kron, onp_kron, test_case)
  959. @pytest.mark.level1
  960. @pytest.mark.platform_arm_ascend_training
  961. @pytest.mark.platform_x86_ascend_training
  962. @pytest.mark.platform_x86_gpu_training
  963. @pytest.mark.platform_x86_cpu
  964. @pytest.mark.env_onecard
  965. def test_cross():
  966. x = onp.arange(8).reshape(2, 2, 1, 2)
  967. y = onp.arange(4).reshape(1, 2, 2)
  968. match_res(mnp.cross, onp.cross, x, y)
  969. match_res(mnp.cross, onp.cross, x, y, axisa=-3, axisb=1, axisc=2)
  970. match_res(mnp.cross, onp.cross, x, y, axisa=-3, axisb=1, axisc=2, axis=1)
  971. x = onp.arange(18).reshape(2, 3, 1, 3)
  972. y = onp.arange(9).reshape(1, 3, 3)
  973. match_res(mnp.cross, onp.cross, x, y)
  974. match_res(mnp.cross, onp.cross, x, y, axisa=-3, axisb=1, axisc=2)
  975. match_res(mnp.cross, onp.cross, x, y, axisa=-3, axisb=1, axisc=2, axis=1)
  976. def mnp_ceil(x):
  977. return mnp.ceil(x)
  978. def onp_ceil(x):
  979. return onp.ceil(x)
  980. @pytest.mark.platform_arm_ascend_training
  981. @pytest.mark.platform_x86_ascend_training
  982. @pytest.mark.platform_x86_gpu_training
  983. @pytest.mark.platform_x86_cpu
  984. @pytest.mark.env_onecard
  985. def test_ceil():
  986. run_unary_test(mnp_ceil, onp_ceil, test_case)
  987. def mnp_positive(x):
  988. return mnp.positive(x)
  989. def onp_positive(x):
  990. return onp.positive(x)
  991. @pytest.mark.level1
  992. @pytest.mark.platform_arm_ascend_training
  993. @pytest.mark.platform_x86_ascend_training
  994. @pytest.mark.platform_x86_gpu_training
  995. @pytest.mark.platform_x86_cpu
  996. @pytest.mark.env_onecard
  997. def test_positive():
  998. arr = onp.arange(-6, 6).reshape((2, 2, 3)).astype('float32')
  999. onp_pos = onp_positive(arr)
  1000. mnp_pos = mnp_positive(to_tensor(arr))
  1001. match_array(mnp_pos.asnumpy(), onp_pos)
  1002. def mnp_negative(x):
  1003. return mnp.negative(x)
  1004. def onp_negative(x):
  1005. return onp.negative(x)
  1006. @pytest.mark.level1
  1007. @pytest.mark.platform_arm_ascend_training
  1008. @pytest.mark.platform_x86_ascend_training
  1009. @pytest.mark.platform_x86_gpu_training
  1010. @pytest.mark.platform_x86_cpu
  1011. @pytest.mark.env_onecard
  1012. def test_negative():
  1013. arr = onp.arange(-6, 6).reshape((2, 2, 3)).astype('float32')
  1014. onp_neg = onp_negative(arr)
  1015. mnp_neg = mnp_negative(to_tensor(arr))
  1016. match_array(mnp_neg.asnumpy(), onp_neg, 1e-5)
  1017. @pytest.mark.level1
  1018. @pytest.mark.platform_arm_ascend_training
  1019. @pytest.mark.platform_x86_ascend_training
  1020. @pytest.mark.platform_x86_gpu_training
  1021. @pytest.mark.platform_x86_cpu
  1022. @pytest.mark.env_onecard
  1023. def test_cumsum():
  1024. x = mnp.ones((16, 16), dtype="bool")
  1025. match_array(mnp.cumsum(x).asnumpy(), onp.cumsum(x.asnumpy()))
  1026. match_array(mnp.cumsum(x, axis=0).asnumpy(),
  1027. onp.cumsum(x.asnumpy(), axis=0))
  1028. match_meta(mnp.cumsum(x).asnumpy(), onp.cumsum(x.asnumpy()))
  1029. x = rand_int(3, 4, 5)
  1030. match_array(mnp.cumsum(to_tensor(x), dtype="bool").asnumpy(),
  1031. onp.cumsum(x, dtype="bool"))
  1032. match_array(mnp.cumsum(to_tensor(x), axis=-1).asnumpy(),
  1033. onp.cumsum(x, axis=-1))
  1034. @pytest.mark.level1
  1035. @pytest.mark.platform_arm_ascend_training
  1036. @pytest.mark.platform_x86_ascend_training
  1037. @pytest.mark.platform_x86_gpu_training
  1038. @pytest.mark.platform_x86_cpu
  1039. @pytest.mark.env_onecard
  1040. def test_promote_types():
  1041. assert mnp.promote_types(mnp.int32, mnp.bool_) == mnp.int32
  1042. assert mnp.promote_types(int, mnp.bool_) == mnp.int32
  1043. assert mnp.promote_types("float32", mnp.int64) == mnp.float32
  1044. assert mnp.promote_types(mnp.int64, mnp.float16) == mnp.float16
  1045. assert mnp.promote_types(int, float) == mnp.float32
  1046. def mnp_diff(input_tensor):
  1047. a = mnp.diff(input_tensor, 2, append=3.0)
  1048. b = mnp.diff(input_tensor, 4, prepend=6, axis=-2)
  1049. c = mnp.diff(input_tensor, 0, append=3.0, axis=-1)
  1050. d = mnp.diff(input_tensor, 10, prepend=6)
  1051. e = mnp.diff(input_tensor, 1, prepend=input_tensor)
  1052. f = mnp.ediff1d(input_tensor, to_end=input_tensor)
  1053. g = mnp.ediff1d(input_tensor)
  1054. h = mnp.ediff1d(input_tensor, to_begin=3)
  1055. return a, b, c, d, e, f, g, h
  1056. def onp_diff(input_array):
  1057. a = onp.diff(input_array, 2, append=3.0)
  1058. b = onp.diff(input_array, 4, prepend=6, axis=-2)
  1059. c = onp.diff(input_array, 0, append=3.0, axis=-1)
  1060. d = onp.diff(input_array, 10, prepend=6)
  1061. e = onp.diff(input_array, 1, prepend=input_array)
  1062. f = onp.ediff1d(input_array, to_end=input_array)
  1063. g = onp.ediff1d(input_array)
  1064. h = onp.ediff1d(input_array, to_begin=3)
  1065. return a, b, c, d, e, f, g, h
  1066. @pytest.mark.level1
  1067. @pytest.mark.platform_arm_ascend_training
  1068. @pytest.mark.platform_x86_ascend_training
  1069. @pytest.mark.platform_x86_gpu_training
  1070. @pytest.mark.platform_x86_cpu
  1071. @pytest.mark.env_onecard
  1072. def test_diff():
  1073. arr = rand_int(3, 4, 5)
  1074. match_res(mnp_diff, onp_diff, arr)
  1075. arr = rand_int(1, 4, 6, 3)
  1076. match_res(mnp_diff, onp_diff, arr)
  1077. def mnp_sin(x):
  1078. return mnp.sin(x)
  1079. def onp_sin(x):
  1080. return onp.sin(x)
  1081. @pytest.mark.level1
  1082. @pytest.mark.platform_arm_ascend_training
  1083. @pytest.mark.platform_x86_ascend_training
  1084. @pytest.mark.platform_x86_gpu_training
  1085. @pytest.mark.platform_x86_cpu
  1086. @pytest.mark.env_onecard
  1087. def test_sin():
  1088. arr = onp.random.rand(2, 3, 4).astype('float32')
  1089. expect = onp_sin(arr)
  1090. actual = mnp_sin(to_tensor(arr))
  1091. match_array(actual.asnumpy(), expect, error=5)
  1092. def mnp_cos(x):
  1093. return mnp.cos(x)
  1094. def onp_cos(x):
  1095. return onp.cos(x)
  1096. @pytest.mark.level1
  1097. @pytest.mark.platform_arm_ascend_training
  1098. @pytest.mark.platform_x86_ascend_training
  1099. @pytest.mark.platform_x86_gpu_training
  1100. @pytest.mark.platform_x86_cpu
  1101. @pytest.mark.env_onecard
  1102. def test_cos():
  1103. arr = onp.random.rand(2, 3, 4).astype('float32')
  1104. expect = onp_cos(arr)
  1105. actual = mnp_cos(to_tensor(arr))
  1106. match_array(actual.asnumpy(), expect, error=5)
  1107. def mnp_tan(x):
  1108. return mnp.tan(x)
  1109. def onp_tan(x):
  1110. return onp.tan(x)
  1111. @pytest.mark.level1
  1112. @pytest.mark.platform_arm_ascend_training
  1113. @pytest.mark.platform_x86_ascend_training
  1114. @pytest.mark.platform_x86_cpu
  1115. @pytest.mark.env_onecard
  1116. def test_tan():
  1117. arr = onp.array([-0.75, -0.5, 0, 0.5, 0.75]).astype('float32')
  1118. expect = onp_tan(arr)
  1119. actual = mnp_tan(to_tensor(arr))
  1120. match_array(actual.asnumpy(), expect, error=5)
  1121. def mnp_arcsin(x):
  1122. return mnp.arcsin(x)
  1123. def onp_arcsin(x):
  1124. return onp.arcsin(x)
  1125. @pytest.mark.level1
  1126. @pytest.mark.platform_arm_ascend_training
  1127. @pytest.mark.platform_x86_ascend_training
  1128. @pytest.mark.platform_x86_gpu_training
  1129. @pytest.mark.platform_x86_cpu
  1130. @pytest.mark.env_onecard
  1131. def test_arcsin():
  1132. arr = onp.random.uniform(-1, 1, 12).astype('float32')
  1133. onp_asin = onp_arcsin(arr)
  1134. mnp_asin = mnp_arcsin(to_tensor(arr))
  1135. match_array(mnp_asin.asnumpy(), onp_asin, error=5)
  1136. def mnp_arccos(x):
  1137. return mnp.arccos(x)
  1138. def onp_arccos(x):
  1139. return onp.arccos(x)
  1140. @pytest.mark.level1
  1141. @pytest.mark.platform_arm_ascend_training
  1142. @pytest.mark.platform_x86_ascend_training
  1143. @pytest.mark.platform_x86_gpu_training
  1144. @pytest.mark.platform_x86_cpu
  1145. @pytest.mark.env_onecard
  1146. def test_arccos():
  1147. arr = onp.random.uniform(-1, 1, 12).astype('float32')
  1148. onp_acos = onp_arccos(arr)
  1149. mnp_acos = mnp_arccos(to_tensor(arr))
  1150. match_array(mnp_acos.asnumpy(), onp_acos, error=5)
  1151. def mnp_arctan(x):
  1152. return mnp.arctan(x)
  1153. def onp_arctan(x):
  1154. return onp.arctan(x)
  1155. @pytest.mark.level1
  1156. @pytest.mark.platform_arm_ascend_training
  1157. @pytest.mark.platform_x86_ascend_training
  1158. @pytest.mark.platform_x86_gpu_training
  1159. @pytest.mark.platform_x86_cpu
  1160. @pytest.mark.env_onecard
  1161. def test_arctan():
  1162. arr = onp.random.uniform(-1, 1, 12).astype('float32')
  1163. onp_atan = onp_arctan(arr)
  1164. mnp_atan = mnp_arctan(to_tensor(arr))
  1165. match_array(mnp_atan.asnumpy(), onp_atan, error=5)
  1166. def mnp_sinh(x):
  1167. return mnp.sinh(x)
  1168. def onp_sinh(x):
  1169. return onp.sinh(x)
  1170. @pytest.mark.level1
  1171. @pytest.mark.platform_arm_ascend_training
  1172. @pytest.mark.platform_x86_ascend_training
  1173. @pytest.mark.platform_x86_cpu
  1174. @pytest.mark.env_onecard
  1175. def test_sinh():
  1176. arr = onp.random.rand(2, 3, 4).astype('float32')
  1177. expect = onp_sinh(arr)
  1178. actual = mnp_sinh(to_tensor(arr))
  1179. match_array(actual.asnumpy(), expect, error=5)
  1180. def mnp_cosh(x):
  1181. return mnp.cosh(x)
  1182. def onp_cosh(x):
  1183. return onp.cosh(x)
  1184. @pytest.mark.level1
  1185. @pytest.mark.platform_arm_ascend_training
  1186. @pytest.mark.platform_x86_ascend_training
  1187. @pytest.mark.platform_x86_cpu
  1188. @pytest.mark.env_onecard
  1189. def test_cosh():
  1190. arr = onp.random.rand(2, 3, 4).astype('float32')
  1191. expect = onp_cosh(arr)
  1192. actual = mnp_cosh(to_tensor(arr))
  1193. match_array(actual.asnumpy(), expect, error=5)
  1194. def mnp_tanh(x):
  1195. return mnp.tanh(x)
  1196. def onp_tanh(x):
  1197. return onp.tanh(x)
  1198. @pytest.mark.level1
  1199. @pytest.mark.platform_arm_ascend_training
  1200. @pytest.mark.platform_x86_ascend_training
  1201. @pytest.mark.platform_x86_gpu_training
  1202. @pytest.mark.platform_x86_cpu
  1203. @pytest.mark.env_onecard
  1204. def test_tanh():
  1205. arr = onp.random.rand(2, 3, 4).astype('float32')
  1206. expect = onp_tanh(arr)
  1207. actual = mnp_tanh(to_tensor(arr))
  1208. match_array(actual.asnumpy(), expect, error=5)
  1209. def mnp_arcsinh(x):
  1210. return mnp.arcsinh(x)
  1211. def onp_arcsinh(x):
  1212. return onp.arcsinh(x)
  1213. @pytest.mark.level1
  1214. @pytest.mark.platform_arm_ascend_training
  1215. @pytest.mark.platform_x86_ascend_training
  1216. @pytest.mark.platform_x86_gpu_training
  1217. @pytest.mark.platform_x86_cpu
  1218. @pytest.mark.env_onecard
  1219. def test_arcsinh():
  1220. arr = onp.random.rand(2, 3, 4).astype('float32')
  1221. expect = onp_arcsinh(arr)
  1222. actual = mnp_arcsinh(to_tensor(arr))
  1223. match_array(actual.asnumpy(), expect, error=5)
  1224. def mnp_arccosh(x):
  1225. return mnp.arccosh(x)
  1226. def onp_arccosh(x):
  1227. return onp.arccosh(x)
  1228. @pytest.mark.level1
  1229. @pytest.mark.platform_arm_ascend_training
  1230. @pytest.mark.platform_x86_ascend_training
  1231. @pytest.mark.platform_x86_gpu_training
  1232. @pytest.mark.platform_x86_cpu
  1233. @pytest.mark.env_onecard
  1234. def test_arccosh():
  1235. arr = onp.random.randint(1, 100, size=(2, 3)).astype('float32')
  1236. expect = onp_arccosh(arr)
  1237. actual = mnp_arccosh(to_tensor(arr))
  1238. match_array(actual.asnumpy(), expect, error=5)
  1239. def mnp_arctanh(x):
  1240. return mnp.arctanh(x)
  1241. def onp_arctanh(x):
  1242. return onp.arctanh(x)
  1243. @pytest.mark.level1
  1244. @pytest.mark.platform_arm_ascend_training
  1245. @pytest.mark.platform_x86_ascend_training
  1246. @pytest.mark.platform_x86_cpu
  1247. @pytest.mark.env_onecard
  1248. def test_arctanh():
  1249. arr = onp.random.uniform(-0.9, 1, 10).astype('float32')
  1250. expect = onp_arctanh(arr)
  1251. actual = mnp_arctanh(to_tensor(arr))
  1252. match_array(actual.asnumpy(), expect, error=5)
  1253. def mnp_arctan2(x, y):
  1254. return mnp.arctan2(x, y)
  1255. def onp_arctan2(x, y):
  1256. return onp.arctan2(x, y)
  1257. @pytest.mark.level1
  1258. @pytest.mark.platform_arm_ascend_training
  1259. @pytest.mark.platform_x86_ascend_training
  1260. @pytest.mark.platform_x86_cpu
  1261. @pytest.mark.env_onecard
  1262. def test_arctan2():
  1263. run_binop_test(mnp_arctan2, onp_arctan2, test_case)
  1264. def mnp_convolve(mode):
  1265. a = mnp.convolve([1, 2, 3, 4, 5], 2, mode=mode)
  1266. b = mnp.convolve([1, 2, 3, 4, 5], [2, 3], mode=mode)
  1267. c = mnp.convolve([1, 2], [2, 5, 10], mode=mode)
  1268. d = mnp.convolve(mnp.array([1, 2, 3, 4, 5]), mnp.array([1, 2, 3, 4, 5]), mode=mode)
  1269. e = mnp.convolve([1, 2, 3, 4, 5], 2, mode=mode)
  1270. return a, b, c, d, e
  1271. def onp_convolve(mode):
  1272. a = onp.convolve([1, 2, 3, 4, 5], 2, mode=mode)
  1273. b = onp.convolve([1, 2, 3, 4, 5], [2, 3], mode=mode)
  1274. c = onp.convolve([1, 2], [2, 5, 10], mode=mode)
  1275. d = onp.convolve(onp.array([1, 2, 3, 4, 5]), onp.array([1, 2, 3, 4, 5]), mode=mode)
  1276. e = onp.convolve([1, 2, 3, 4, 5], 2, mode=mode)
  1277. return a, b, c, d, e
  1278. @pytest.mark.level1
  1279. @pytest.mark.platform_arm_ascend_training
  1280. @pytest.mark.platform_x86_ascend_training
  1281. @pytest.mark.platform_x86_gpu_training
  1282. @pytest.mark.platform_x86_cpu
  1283. @pytest.mark.env_onecard
  1284. def test_convolve():
  1285. for mode in ['full', 'same', 'valid']:
  1286. mnp_res = mnp_convolve(mode)
  1287. onp_res = onp_convolve(mode)
  1288. match_all_arrays(mnp_res, onp_res)
  1289. @pytest.mark.level1
  1290. @pytest.mark.platform_arm_ascend_training
  1291. @pytest.mark.platform_x86_ascend_training
  1292. @pytest.mark.platform_x86_gpu_training
  1293. @pytest.mark.platform_x86_cpu
  1294. @pytest.mark.env_onecard
  1295. def test_cov():
  1296. x = onp.random.random((3, 4)).tolist()
  1297. mnp_res = mnp.cov(x)
  1298. onp_res = onp.cov(x)
  1299. match_all_arrays(mnp_res, onp_res, error=1e-5)
  1300. mnp_res = mnp.cov(x[0])
  1301. onp_res = onp.cov(x[0])
  1302. match_all_arrays(mnp_res, onp_res, error=1e-5)
  1303. w1 = [0, 1, 2, 3]
  1304. w2 = [4, 5, 6, 7]
  1305. mnp_res = mnp.cov(x, fweights=w1)
  1306. onp_res = onp.cov(x, fweights=w1)
  1307. match_all_arrays(mnp_res, onp_res, error=1e-5)
  1308. mnp_res = mnp.cov(x, aweights=w2)
  1309. onp_res = onp.cov(x, aweights=w2)
  1310. match_all_arrays(mnp_res, onp_res, error=1e-5)
  1311. mnp_res = mnp.cov(x, fweights=w1, aweights=w2)
  1312. onp_res = onp.cov(x, fweights=w1, aweights=w2)
  1313. match_all_arrays(mnp_res, onp_res, error=1e-5)
  1314. mnp_res = mnp.cov(x, fweights=w1, aweights=w2, ddof=3)
  1315. onp_res = onp.cov(x, fweights=w1, aweights=w2, ddof=3)
  1316. match_all_arrays(mnp_res, onp_res, error=1e-5)
  1317. mnp_res = mnp.cov(x, fweights=w1, aweights=w2, bias=True)
  1318. onp_res = onp.cov(x, fweights=w1, aweights=w2, bias=True)
  1319. match_all_arrays(mnp_res, onp_res, error=1e-5)
  1320. mnp_res = mnp.cov(x, fweights=w1[0:3], aweights=w2[0:3], rowvar=False, bias=True)
  1321. onp_res = onp.cov(x, fweights=w1[0:3], aweights=w2[0:3], rowvar=False, bias=True)
  1322. match_all_arrays(mnp_res, onp_res, error=1e-5)
  1323. @pytest.mark.level1
  1324. @pytest.mark.platform_arm_ascend_training
  1325. @pytest.mark.platform_x86_ascend_training
  1326. @pytest.mark.platform_x86_gpu_training
  1327. @pytest.mark.platform_x86_cpu
  1328. @pytest.mark.env_onecard
  1329. def test_trapz():
  1330. y = rand_int(2, 3, 4, 5)
  1331. match_res(mnp.trapz, onp.trapz, y)
  1332. match_res(mnp.trapz, onp.trapz, y, x=[-5, -3, 0, 7, 10])
  1333. match_res(mnp.trapz, onp.trapz, y, dx=2, axis=3)
  1334. match_res(mnp.trapz, onp.trapz, y, x=[1, 5, 6, 9], dx=3, axis=-2)
  1335. def mnp_gcd(x, y):
  1336. return mnp.gcd(x, y)
  1337. def onp_gcd(x, y):
  1338. return onp.gcd(x, y)
  1339. @pytest.mark.level1
  1340. @pytest.mark.platform_arm_ascend_training
  1341. @pytest.mark.platform_x86_ascend_training
  1342. @pytest.mark.platform_x86_gpu_training
  1343. @pytest.mark.platform_x86_cpu
  1344. @pytest.mark.env_onecard
  1345. def test_gcd():
  1346. x = onp.arange(-12, 12).reshape(2, 3, 4)
  1347. y = onp.arange(24).reshape(2, 3, 4)
  1348. match_res(mnp_gcd, onp_gcd, x, y)
  1349. def mnp_lcm(x, y):
  1350. return mnp.lcm(x, y)
  1351. def onp_lcm(x, y):
  1352. return onp.lcm(x, y)
  1353. @pytest.mark.level1
  1354. @pytest.mark.platform_arm_ascend_training
  1355. @pytest.mark.platform_x86_ascend_training
  1356. @pytest.mark.platform_x86_gpu_training
  1357. @pytest.mark.platform_x86_cpu
  1358. @pytest.mark.env_onecard
  1359. def test_lcm():
  1360. x = onp.arange(-12, 12).reshape(2, 3, 4)
  1361. y = onp.arange(24).reshape(2, 3, 4)
  1362. match_res(mnp_lcm, onp_lcm, x, y)
  1363. def mnp_nansum(x):
  1364. a = mnp.nansum(x)
  1365. b = mnp.nansum(x, keepdims=True)
  1366. c = mnp.nansum(x, axis=-2)
  1367. d = mnp.nansum(x, axis=0, keepdims=True)
  1368. e = mnp.nansum(x, axis=(-2, 3))
  1369. f = mnp.nansum(x, axis=(-3, -1), keepdims=True)
  1370. return a, b, c, d, e, f
  1371. def onp_nansum(x):
  1372. a = onp.nansum(x)
  1373. b = onp.nansum(x, keepdims=True)
  1374. c = onp.nansum(x, axis=-2)
  1375. d = onp.nansum(x, axis=0, keepdims=True)
  1376. e = onp.nansum(x, axis=(-2, 3))
  1377. f = onp.nansum(x, axis=(-3, -1), keepdims=True)
  1378. return a, b, c, d, e, f
  1379. @pytest.mark.level1
  1380. @pytest.mark.platform_x86_gpu_training
  1381. @pytest.mark.platform_x86_cpu
  1382. @pytest.mark.env_onecard
  1383. def test_nansum():
  1384. x = rand_int(2, 3, 4, 5)
  1385. x[0][2][1][3] = onp.nan
  1386. x[1][0][2][4] = onp.nan
  1387. x[1][1][1][1] = onp.nan
  1388. run_multi_test(mnp_nansum, onp_nansum, (x,))
  1389. def mnp_nanmean(x):
  1390. a = mnp.nanmean(x)
  1391. b = mnp.nanmean(x, keepdims=True)
  1392. c = mnp.nanmean(x, axis=-2)
  1393. d = mnp.nanmean(x, axis=0, keepdims=True)
  1394. e = mnp.nanmean(x, axis=(-2, 3))
  1395. f = mnp.nanmean(x, axis=(-3, -1), keepdims=True)
  1396. return a, b, c, d, e, f
  1397. def onp_nanmean(x):
  1398. a = onp.nanmean(x)
  1399. b = onp.nanmean(x, keepdims=True)
  1400. c = onp.nanmean(x, axis=-2)
  1401. d = onp.nanmean(x, axis=0, keepdims=True)
  1402. e = onp.nanmean(x, axis=(-2, 3))
  1403. f = onp.nanmean(x, axis=(-3, -1), keepdims=True)
  1404. return a, b, c, d, e, f
  1405. @pytest.mark.level1
  1406. @pytest.mark.platform_x86_gpu_training
  1407. @pytest.mark.platform_x86_cpu
  1408. @pytest.mark.env_onecard
  1409. def test_nanmean():
  1410. x = rand_int(2, 3, 4, 5)
  1411. x[0][2][1][3] = onp.nan
  1412. x[1][0][2][4] = onp.nan
  1413. x[1][1][1][1] = onp.nan
  1414. run_multi_test(mnp_nanmean, onp_nanmean, (x,))
  1415. def mnp_mean(*arrs):
  1416. arr1 = arrs[0]
  1417. arr2 = arrs[1]
  1418. arr3 = arrs[2]
  1419. a = mnp.mean(arr1)
  1420. b = mnp.mean(arr2, keepdims=True)
  1421. c = mnp.mean(arr3, keepdims=False)
  1422. d = mnp.mean(arr2, axis=0, keepdims=True)
  1423. e = mnp.mean(arr3, axis=(0, -1))
  1424. f = mnp.mean(arr3, axis=-1, keepdims=True)
  1425. return a, b, c, d, e, f
  1426. def onp_mean(*arrs):
  1427. arr1 = arrs[0]
  1428. arr2 = arrs[1]
  1429. arr3 = arrs[2]
  1430. a = onp.mean(arr1)
  1431. b = onp.mean(arr2, keepdims=True)
  1432. c = onp.mean(arr3, keepdims=False)
  1433. d = onp.mean(arr2, axis=0, keepdims=True)
  1434. e = onp.mean(arr3, axis=(0, -1))
  1435. f = onp.mean(arr3, axis=-1, keepdims=True)
  1436. return a, b, c, d, e, f
  1437. @pytest.mark.level1
  1438. @pytest.mark.platform_arm_ascend_training
  1439. @pytest.mark.platform_x86_ascend_training
  1440. @pytest.mark.platform_x86_gpu_training
  1441. @pytest.mark.platform_x86_cpu
  1442. @pytest.mark.env_onecard
  1443. def test_mean():
  1444. run_multi_test(mnp_mean, onp_mean, test_case.arrs, error=3)
  1445. run_multi_test(mnp_mean, onp_mean, test_case.expanded_arrs, error=3)
  1446. run_multi_test(mnp_mean, onp_mean, test_case.scalars, error=3)
  1447. run_multi_test(mnp_mean, onp_mean, test_case.empty_arrs, error=3)
  1448. @pytest.mark.level1
  1449. @pytest.mark.platform_arm_ascend_training
  1450. @pytest.mark.platform_x86_ascend_training
  1451. @pytest.mark.platform_x86_gpu_training
  1452. @pytest.mark.platform_x86_cpu
  1453. @pytest.mark.env_onecard
  1454. def test_exception_innner():
  1455. with pytest.raises(ValueError):
  1456. mnp.inner(to_tensor(test_case.arrs[0]),
  1457. to_tensor(test_case.arrs[1]))
  1458. @pytest.mark.level1
  1459. @pytest.mark.platform_arm_ascend_training
  1460. @pytest.mark.platform_x86_ascend_training
  1461. @pytest.mark.platform_x86_gpu_training
  1462. @pytest.mark.platform_x86_cpu
  1463. @pytest.mark.env_onecard
  1464. def test_exception_add():
  1465. with pytest.raises(ValueError):
  1466. mnp.add(to_tensor(test_case.arrs[1]), to_tensor(test_case.arrs[2]))
  1467. @pytest.mark.level1
  1468. @pytest.mark.platform_arm_ascend_training
  1469. @pytest.mark.platform_x86_ascend_training
  1470. @pytest.mark.platform_x86_gpu_training
  1471. @pytest.mark.platform_x86_cpu
  1472. @pytest.mark.env_onecard
  1473. def test_exception_mean():
  1474. with pytest.raises(ValueError):
  1475. mnp.mean(to_tensor(test_case.arrs[0]), (-1, 0))