You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_math_ops.py 31 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """unit tests for numpy math operations"""
  16. import pytest
  17. import numpy as onp
  18. import mindspore.numpy as mnp
  19. from .utils import rand_int, rand_bool, run_binop_test, run_unary_test, run_multi_test, \
  20. run_single_test, match_res, match_array
  21. class Cases():
  22. def __init__(self):
  23. self.arrs = [
  24. rand_int(2),
  25. rand_int(2, 3),
  26. rand_int(2, 3, 4),
  27. ]
  28. # scalars expanded across the 0th dimension
  29. self.scalars = [
  30. rand_int(),
  31. rand_int(1),
  32. rand_int(1, 1),
  33. ]
  34. # empty arrays
  35. self.empty_arrs = [
  36. rand_int(0),
  37. rand_int(4, 0),
  38. rand_int(2, 0, 2),
  39. ]
  40. # arrays of the same size expanded across the 0th dimension
  41. self.expanded_arrs = [
  42. rand_int(2, 3),
  43. rand_int(1, 2, 3),
  44. rand_int(1, 1, 2, 3),
  45. ]
  46. # arrays with last dimension aligned
  47. self.aligned_arrs = [
  48. rand_int(2, 3),
  49. rand_int(1, 4, 3),
  50. rand_int(5, 1, 2, 3),
  51. rand_int(4, 2, 1, 1, 3),
  52. ]
  53. # arrays which can be broadcast
  54. self.broadcastables = [
  55. rand_int(5),
  56. rand_int(6, 1),
  57. rand_int(7, 1, 5),
  58. ]
  59. # boolean arrays which can be broadcast
  60. self.bool_broadcastables = [
  61. rand_bool(),
  62. rand_bool(1),
  63. rand_bool(5),
  64. rand_bool(6, 1),
  65. rand_bool(7, 1, 5),
  66. rand_bool(8, 1, 6, 1),
  67. ]
  68. # core dimension 0 is matched for each
  69. # pair of array[i] and array[i + 1]
  70. self.core_broadcastables = [
  71. rand_int(3),
  72. rand_int(3),
  73. rand_int(6),
  74. rand_int(6, 4),
  75. rand_int(5, 2),
  76. rand_int(2),
  77. rand_int(2, 9),
  78. rand_int(9, 8),
  79. rand_int(6),
  80. rand_int(2, 6, 5),
  81. rand_int(9, 2, 7),
  82. rand_int(7),
  83. rand_int(5, 2, 4),
  84. rand_int(6, 1, 4, 9),
  85. rand_int(7, 1, 5, 3, 2),
  86. rand_int(8, 1, 6, 1, 2, 9),
  87. ]
  88. # arrays with dimensions of size 1
  89. self.nested_arrs = [
  90. rand_int(1),
  91. rand_int(1, 2),
  92. rand_int(3, 1, 8),
  93. rand_int(1, 3, 9, 1),
  94. ]
  95. test_case = Cases()
  96. def mnp_add(x1, x2):
  97. return mnp.add(x1, x2)
  98. def onp_add(x1, x2):
  99. return onp.add(x1, x2)
  100. def mnp_subtract(x1, x2):
  101. return mnp.subtract(x1, x2)
  102. def onp_subtract(x1, x2):
  103. return onp.subtract(x1, x2)
  104. def mnp_mutiply(x1, x2):
  105. return mnp.multiply(x1, x2)
  106. def onp_multiply(x1, x2):
  107. return onp.multiply(x1, x2)
  108. def mnp_divide(x1, x2):
  109. return mnp.divide(x1, x2)
  110. def onp_divide(x1, x2):
  111. return onp.divide(x1, x2)
  112. def mnp_true_divide(x1, x2):
  113. return mnp.true_divide(x1, x2)
  114. def onp_true_divide(x1, x2):
  115. return onp.true_divide(x1, x2)
  116. def mnp_power(x1, x2):
  117. return mnp.power(x1, x2)
  118. def onp_power(x1, x2):
  119. return onp.power(x1, x2)
  120. def mnp_float_power(x1, x2):
  121. return mnp.float_power(x1, x2)
  122. def onp_float_power(x1, x2):
  123. return onp.float_power(x1, x2)
  124. def mnp_minimum(a, b):
  125. return mnp.minimum(a, b)
  126. def onp_minimum(a, b):
  127. return onp.minimum(a, b)
  128. @pytest.mark.level1
  129. @pytest.mark.platform_arm_ascend_training
  130. @pytest.mark.platform_x86_ascend_training
  131. @pytest.mark.platform_x86_gpu_training
  132. @pytest.mark.platform_x86_cpu
  133. @pytest.mark.env_onecard
  134. def test_add():
  135. run_binop_test(mnp_add, onp_add, test_case)
  136. @pytest.mark.level1
  137. @pytest.mark.platform_arm_ascend_training
  138. @pytest.mark.platform_x86_ascend_training
  139. @pytest.mark.platform_x86_gpu_training
  140. @pytest.mark.platform_x86_cpu
  141. @pytest.mark.env_onecard
  142. def test_subtract():
  143. run_binop_test(mnp_subtract, onp_subtract, test_case)
  144. @pytest.mark.level1
  145. @pytest.mark.platform_arm_ascend_training
  146. @pytest.mark.platform_x86_ascend_training
  147. @pytest.mark.platform_x86_gpu_training
  148. @pytest.mark.platform_x86_cpu
  149. @pytest.mark.env_onecard
  150. def test_multiply():
  151. run_binop_test(mnp_mutiply, onp_multiply, test_case)
  152. @pytest.mark.level1
  153. @pytest.mark.platform_arm_ascend_training
  154. @pytest.mark.platform_x86_ascend_training
  155. @pytest.mark.platform_x86_gpu_training
  156. @pytest.mark.platform_x86_cpu
  157. @pytest.mark.env_onecard
  158. def test_divide():
  159. run_binop_test(mnp_divide, onp_divide, test_case)
  160. @pytest.mark.level1
  161. @pytest.mark.platform_arm_ascend_training
  162. @pytest.mark.platform_x86_ascend_training
  163. @pytest.mark.platform_x86_gpu_training
  164. @pytest.mark.platform_x86_cpu
  165. @pytest.mark.env_onecard
  166. def test_true_divide():
  167. run_binop_test(mnp_true_divide, onp_true_divide, test_case)
  168. @pytest.mark.level1
  169. @pytest.mark.platform_arm_ascend_training
  170. @pytest.mark.platform_x86_ascend_training
  171. @pytest.mark.platform_x86_gpu_training
  172. @pytest.mark.platform_x86_cpu
  173. @pytest.mark.env_onecard
  174. def test_power():
  175. run_binop_test(mnp_power, onp_power, test_case, error=1e-5)
  176. @pytest.mark.level1
  177. @pytest.mark.platform_arm_ascend_training
  178. @pytest.mark.platform_x86_ascend_training
  179. @pytest.mark.platform_x86_gpu_training
  180. @pytest.mark.platform_x86_cpu
  181. @pytest.mark.env_onecard
  182. def test_float_power():
  183. run_binop_test(mnp_float_power, onp_float_power, test_case, error=1e-5)
  184. @pytest.mark.level1
  185. @pytest.mark.platform_arm_ascend_training
  186. @pytest.mark.platform_x86_ascend_training
  187. @pytest.mark.platform_x86_gpu_training
  188. @pytest.mark.platform_x86_cpu
  189. @pytest.mark.env_onecard
  190. def test_minimum():
  191. run_binop_test(mnp_minimum, onp_minimum, test_case)
  192. def mnp_add_kwargs(x, y, where=None, out=None):
  193. return mnp.add(x, y, where=where, out=out)
  194. def onp_add_kwargs(x, y, where=None, out=None):
  195. return onp.add(x, y, where=where, out=out)
  196. @pytest.mark.level1
  197. @pytest.mark.platform_arm_ascend_training
  198. @pytest.mark.platform_x86_ascend_training
  199. @pytest.mark.platform_x86_gpu_training
  200. @pytest.mark.platform_x86_cpu
  201. @pytest.mark.env_onecard
  202. def test_add_kwargs():
  203. for where in test_case.bool_broadcastables[:2]:
  204. for x in test_case.broadcastables[:2]:
  205. for y in test_case.broadcastables[:2]:
  206. shape_out = onp.broadcast(where, x, y).shape
  207. out = rand_int(*shape_out)
  208. match_res(mnp_add_kwargs, onp_add_kwargs, x, y, where, out)
  209. def mnp_tensordot(x, y):
  210. a = mnp.tensordot(x, y)
  211. b = mnp.tensordot(x, y, axes=0)
  212. c = mnp.tensordot(x, y, axes=1)
  213. d = mnp.tensordot(x, y, axes=2)
  214. e = mnp.tensordot(x, y, axes=(3, 0))
  215. f = mnp.tensordot(x, y, axes=[2, 1])
  216. g = mnp.tensordot(x, y, axes=((2, 3), (0, 1)))
  217. h = mnp.tensordot(x, y, axes=[[3, 2], [1, 0]])
  218. return a, b, c, d, e, f, g, h
  219. def onp_tensordot(x, y):
  220. a = onp.tensordot(x, y)
  221. b = onp.tensordot(x, y, axes=0)
  222. c = onp.tensordot(x, y, axes=1)
  223. d = onp.tensordot(x, y, axes=2)
  224. e = onp.tensordot(x, y, axes=(3, 0))
  225. f = onp.tensordot(x, y, axes=[2, 1])
  226. g = onp.tensordot(x, y, axes=((2, 3), (0, 1)))
  227. h = onp.tensordot(x, y, axes=[[3, 2], [1, 0]])
  228. return a, b, c, d, e, f, g, h
  229. @pytest.mark.level1
  230. @pytest.mark.platform_arm_ascend_training
  231. @pytest.mark.platform_x86_ascend_training
  232. @pytest.mark.platform_x86_gpu_training
  233. @pytest.mark.platform_x86_cpu
  234. @pytest.mark.env_onecard
  235. def test_tensordot():
  236. x = rand_int(4, 2, 7, 7)
  237. y = rand_int(7, 7, 6)
  238. run_multi_test(mnp_tensordot, onp_tensordot, (x, y))
  239. def mnp_std(x):
  240. a = mnp.std(x)
  241. b = mnp.std(x, axis=None)
  242. c = mnp.std(x, axis=0)
  243. d = mnp.std(x, axis=1)
  244. e = mnp.std(x, axis=(-1, 1))
  245. f = mnp.std(x, axis=(0, 1, 2))
  246. g = mnp.std(x, axis=None, ddof=1, keepdims=True)
  247. h = mnp.std(x, axis=0, ddof=1, keepdims=True)
  248. i = mnp.std(x, axis=(2), ddof=1, keepdims=True)
  249. return a, b, c, d, e, f, g, h, i
  250. def onp_std(x):
  251. a = onp.std(x)
  252. b = onp.std(x, axis=None)
  253. c = onp.std(x, axis=0)
  254. d = onp.std(x, axis=1)
  255. e = onp.std(x, axis=(-1, 1))
  256. f = onp.std(x, axis=(0, 1, 2))
  257. g = onp.std(x, axis=None, ddof=1, keepdims=True)
  258. h = onp.std(x, axis=0, ddof=1, keepdims=True)
  259. i = onp.std(x, axis=(2), ddof=1, keepdims=True)
  260. return a, b, c, d, e, f, g, h, i
  261. @pytest.mark.level1
  262. @pytest.mark.platform_arm_ascend_training
  263. @pytest.mark.platform_x86_ascend_training
  264. @pytest.mark.platform_x86_gpu_training
  265. @pytest.mark.platform_x86_cpu
  266. @pytest.mark.env_onecard
  267. def test_std():
  268. arr1 = rand_int(2, 3, 4, 5)
  269. arr2 = rand_int(4, 5, 4, 3, 3)
  270. run_single_test(mnp_std, onp_std, arr1, error=1e-5)
  271. run_single_test(mnp_std, onp_std, arr2, error=1e-5)
  272. def mnp_var(x):
  273. a = mnp.std(x)
  274. b = mnp.std(x, axis=0)
  275. c = mnp.std(x, axis=(0))
  276. d = mnp.std(x, axis=(0, 1, 2))
  277. e = mnp.std(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
  278. return a, b, c, d, e
  279. def onp_var(x):
  280. a = onp.std(x)
  281. b = onp.std(x, axis=0)
  282. c = onp.std(x, axis=(0))
  283. d = onp.std(x, axis=(0, 1, 2))
  284. e = onp.std(x, axis=(-1, 1, 2), ddof=1, keepdims=True)
  285. return a, b, c, d, e
  286. @pytest.mark.level1
  287. @pytest.mark.platform_arm_ascend_training
  288. @pytest.mark.platform_x86_ascend_training
  289. @pytest.mark.platform_x86_gpu_training
  290. @pytest.mark.platform_x86_cpu
  291. @pytest.mark.env_onecard
  292. def test_var():
  293. arr1 = rand_int(2, 3, 4, 5)
  294. arr2 = rand_int(4, 5, 4, 3, 3)
  295. run_single_test(mnp_var, onp_var, arr1, error=1e-5)
  296. run_single_test(mnp_var, onp_var, arr2, error=1e-5)
  297. def mnp_average(x):
  298. a = mnp.average(x)
  299. b = mnp.average(x, axis=None)
  300. c = mnp.average(x, axis=0)
  301. d = mnp.average(x, axis=1)
  302. e = mnp.average(x, axis=(-2, 1))
  303. f = mnp.average(x, axis=(0, 1, 2, 3))
  304. g = mnp.average(x, axis=None, weights=x)
  305. h = mnp.average(x, axis=0, weights=x)
  306. i = mnp.average(x, axis=(1, 2, 3), weights=x)
  307. return a, b, c, d, e, f, g, h, i
  308. def onp_average(x):
  309. a = onp.average(x)
  310. b = onp.average(x, axis=None)
  311. c = onp.average(x, axis=0)
  312. d = onp.average(x, axis=1)
  313. e = onp.average(x, axis=(-2, 1))
  314. f = onp.average(x, axis=(0, 1, 2, 3))
  315. g = onp.average(x, axis=None, weights=x)
  316. h = onp.average(x, axis=0, weights=x)
  317. i = onp.average(x, axis=(1, 2, 3), weights=x)
  318. return a, b, c, d, e, f, g, h, i
  319. @pytest.mark.level1
  320. @pytest.mark.platform_arm_ascend_training
  321. @pytest.mark.platform_x86_ascend_training
  322. @pytest.mark.platform_x86_gpu_training
  323. @pytest.mark.platform_x86_cpu
  324. @pytest.mark.env_onecard
  325. def test_average():
  326. arr1 = rand_int(2, 3, 4, 5)
  327. arr2 = rand_int(4, 5, 1, 3, 1)
  328. run_single_test(mnp_average, onp_average, arr1, error=1e-5)
  329. run_single_test(mnp_average, onp_average, arr2, error=1e-5)
  330. def mnp_count_nonzero(x):
  331. a = mnp.count_nonzero(x)
  332. b = mnp.count_nonzero(x, axis=None)
  333. c = mnp.count_nonzero(x, axis=0)
  334. d = mnp.count_nonzero(x, axis=1)
  335. e = mnp.count_nonzero(x, axis=(-2, 1))
  336. f = mnp.count_nonzero(x, axis=(0, 1, 2, 3))
  337. return a, b, c, d, e, f
  338. def onp_count_nonzero(x):
  339. a = onp.count_nonzero(x)
  340. b = onp.count_nonzero(x, axis=None)
  341. c = onp.count_nonzero(x, axis=0)
  342. d = onp.count_nonzero(x, axis=1)
  343. e = onp.count_nonzero(x, axis=(-2, 1))
  344. f = onp.count_nonzero(x, axis=(0, 1, 2, 3))
  345. return a, b, c, d, e, f
  346. @pytest.mark.level1
  347. @pytest.mark.platform_arm_ascend_training
  348. @pytest.mark.platform_x86_ascend_training
  349. @pytest.mark.platform_x86_gpu_training
  350. @pytest.mark.platform_x86_cpu
  351. @pytest.mark.env_onecard
  352. def test_count_nonzero():
  353. # minus 5 to make some values below zero
  354. arr1 = rand_int(2, 3, 4, 5) - 5
  355. arr2 = rand_int(4, 5, 4, 3, 3) - 5
  356. run_single_test(mnp_count_nonzero, onp_count_nonzero, arr1)
  357. run_single_test(mnp_count_nonzero, onp_count_nonzero, arr2)
  358. def mnp_inner(a, b):
  359. return mnp.inner(a, b)
  360. def onp_inner(a, b):
  361. return onp.inner(a, b)
  362. @pytest.mark.level1
  363. @pytest.mark.platform_arm_ascend_training
  364. @pytest.mark.platform_x86_ascend_training
  365. @pytest.mark.platform_x86_gpu_training
  366. @pytest.mark.platform_x86_cpu
  367. @pytest.mark.env_onecard
  368. def test_inner():
  369. for arr1 in test_case.aligned_arrs:
  370. for arr2 in test_case.aligned_arrs:
  371. match_res(mnp_inner, onp_inner, arr1, arr2)
  372. for scalar1 in test_case.scalars:
  373. for scalar2 in test_case.scalars:
  374. match_res(mnp_inner, onp_inner,
  375. scalar1, scalar2)
  376. def mnp_dot(a, b):
  377. return mnp.dot(a, b)
  378. def onp_dot(a, b):
  379. return onp.dot(a, b)
  380. @pytest.mark.level1
  381. @pytest.mark.platform_arm_ascend_training
  382. @pytest.mark.platform_x86_ascend_training
  383. @pytest.mark.platform_x86_gpu_training
  384. @pytest.mark.platform_x86_cpu
  385. @pytest.mark.env_onecard
  386. def test_dot():
  387. # test case (1D, 1D)
  388. match_res(mnp_dot, onp_dot, rand_int(3), rand_int(3))
  389. # test case (2D, 2D)
  390. match_res(mnp_dot, onp_dot, rand_int(4, 7), rand_int(7, 2))
  391. # test case (0D, _) (_, 0D)
  392. match_res(mnp_dot, onp_dot, rand_int(), rand_int(1, 9, 3))
  393. match_res(mnp_dot, onp_dot, rand_int(8, 5, 6, 3), rand_int())
  394. # test case (ND, 1D)
  395. match_res(mnp_dot, onp_dot, rand_int(2, 4, 5), rand_int(5))
  396. # test case (ND, MD)
  397. match_res(mnp_dot, onp_dot, rand_int(5, 4, 1, 8), rand_int(8, 3))
  398. for i in range(8):
  399. match_res(mnp_dot, onp_dot,
  400. test_case.core_broadcastables[2*i], test_case.core_broadcastables[2*i + 1])
  401. def mnp_outer(a, b):
  402. return mnp.outer(a, b)
  403. def onp_outer(a, b):
  404. return onp.outer(a, b)
  405. @pytest.mark.level1
  406. @pytest.mark.platform_arm_ascend_training
  407. @pytest.mark.platform_x86_ascend_training
  408. @pytest.mark.platform_x86_gpu_training
  409. @pytest.mark.platform_x86_cpu
  410. @pytest.mark.env_onecard
  411. def test_outer():
  412. run_binop_test(mnp_outer, onp_outer, test_case)
  413. @pytest.mark.level1
  414. @pytest.mark.platform_arm_ascend_training
  415. @pytest.mark.platform_x86_ascend_training
  416. @pytest.mark.platform_x86_gpu_training
  417. @pytest.mark.platform_x86_cpu
  418. @pytest.mark.env_onecard
  419. def test_type_promotion():
  420. arr = rand_int(2, 3)
  421. onp_sum = onp_add(arr, arr)
  422. a = mnp.asarray(arr, dtype='float16')
  423. b = mnp.asarray(arr, dtype='float32')
  424. c = mnp.asarray(arr, dtype='int32')
  425. match_array(mnp_add(a, b).asnumpy(), onp_sum)
  426. match_array(mnp_add(b, c).asnumpy(), onp_sum)
  427. def mnp_absolute(x):
  428. return mnp.absolute(x)
  429. def onp_absolute(x):
  430. return onp.absolute(x)
  431. @pytest.mark.level1
  432. @pytest.mark.platform_arm_ascend_training
  433. @pytest.mark.platform_x86_ascend_training
  434. @pytest.mark.platform_x86_gpu_training
  435. @pytest.mark.platform_x86_cpu
  436. @pytest.mark.env_onecard
  437. def test_absolute():
  438. arr = rand_int(2, 3)
  439. a = mnp.asarray(arr, dtype='float16')
  440. b = mnp.asarray(arr, dtype='float32')
  441. c = mnp.asarray(arr, dtype='uint8')
  442. d = mnp.asarray(arr, dtype='bool')
  443. match_array(mnp_absolute(a).asnumpy(), onp_absolute(a.asnumpy()))
  444. match_array(mnp_absolute(b).asnumpy(), onp_absolute(b.asnumpy()))
  445. match_array(mnp_absolute(c).asnumpy(), onp_absolute(c.asnumpy()))
  446. match_array(mnp_absolute(d).asnumpy(), onp_absolute(d.asnumpy()))
  447. where = rand_int(2, 3).astype('bool')
  448. out = rand_int(2, 3)
  449. match_array(mnp.absolute(a, out=mnp.asarray(out), where=mnp.asarray(where)).asnumpy(),
  450. onp.absolute(a.asnumpy(), out=out, where=where))
  451. @pytest.mark.level1
  452. @pytest.mark.platform_arm_ascend_training
  453. @pytest.mark.platform_x86_ascend_training
  454. @pytest.mark.platform_x86_gpu_training
  455. @pytest.mark.platform_x86_cpu
  456. @pytest.mark.env_onecard
  457. def test_deg2rad_rad2deg():
  458. arrs = [rand_int(2, 3), rand_int(1, 2, 4), rand_int(2, 4)]
  459. for arr in arrs:
  460. match_res(mnp.deg2rad, onp.deg2rad, arr)
  461. match_res(mnp.rad2deg, onp.rad2deg, arr)
  462. def mnp_ptp(x):
  463. a = mnp.ptp(x)
  464. b = mnp.ptp(x, keepdims=True)
  465. c = mnp.ptp(x, axis=(0, 1))
  466. d = mnp.ptp(x, axis=-1)
  467. return a, b, c, d
  468. def onp_ptp(x):
  469. a = onp.ptp(x)
  470. b = onp.ptp(x, keepdims=True)
  471. c = onp.ptp(x, axis=(0, 1))
  472. d = onp.ptp(x, axis=-1)
  473. return a, b, c, d
  474. @pytest.mark.level1
  475. @pytest.mark.platform_arm_ascend_training
  476. @pytest.mark.platform_x86_ascend_training
  477. @pytest.mark.platform_x86_gpu_training
  478. @pytest.mark.platform_x86_cpu
  479. @pytest.mark.env_onecard
  480. def test_ptp():
  481. arrs = [rand_int(2, 3), rand_int(1, 2, 4), rand_int(2, 4)]
  482. for arr in arrs:
  483. match_res(mnp_ptp, onp_ptp, arr)
  484. def mnp_add_dtype(x1, x2, out, where):
  485. a = mnp.add(x1, x2, dtype=mnp.float16)
  486. b = mnp.add(x1, x2, out=out, dtype=mnp.float16)
  487. c = mnp.add(x1, x2, where=where, dtype=mnp.float16)
  488. d = mnp.add(x1, x2, out=out, where=where, dtype=mnp.float16)
  489. return a, b, c, d
  490. def onp_add_dtype(x1, x2, out, where):
  491. a = onp.add(x1, x2, dtype=onp.float16)
  492. b = onp.add(x1, x2, out=out, dtype=onp.float16)
  493. c = onp.add(x1, x2, where=where, dtype=onp.float16)
  494. d = onp.add(x1, x2, out=out, where=where, dtype=onp.float16)
  495. return a, b, c, d
  496. @pytest.mark.level1
  497. @pytest.mark.platform_arm_ascend_training
  498. @pytest.mark.platform_x86_ascend_training
  499. @pytest.mark.platform_x86_gpu_training
  500. @pytest.mark.platform_x86_cpu
  501. @pytest.mark.env_onecard
  502. def test_add_dtype():
  503. x1 = rand_int(2, 3).astype('int32')
  504. x2 = rand_int(2, 3).astype('int32')
  505. out = rand_int(2, 3).astype('float32')
  506. where = rand_bool(2, 3)
  507. arrs = (x1, x2, out, where)
  508. mnp_arrs = map(mnp.array, arrs)
  509. mnp_res = mnp_add_dtype(*mnp_arrs)
  510. onp_res = onp_add_dtype(*arrs)
  511. for actual, expected in zip(mnp_res, onp_res):
  512. assert actual.asnumpy().dtype == expected.dtype
  513. def mnp_matmul(x1, x2):
  514. return mnp.matmul(x1, x2)
  515. def onp_matmul(x1, x2):
  516. return onp.matmul(x1, x2)
  517. @pytest.mark.level1
  518. @pytest.mark.platform_arm_ascend_training
  519. @pytest.mark.platform_x86_ascend_training
  520. @pytest.mark.platform_x86_gpu_training
  521. @pytest.mark.platform_x86_cpu
  522. @pytest.mark.env_onecard
  523. def test_matmul():
  524. for scalar1 in test_case.scalars[1:]:
  525. for scalar2 in test_case.scalars[1:]:
  526. match_res(mnp_matmul, onp_matmul,
  527. scalar1, scalar2)
  528. for i in range(8):
  529. match_res(mnp_matmul, onp_matmul,
  530. test_case.core_broadcastables[2*i],
  531. test_case.core_broadcastables[2*i + 1])
  532. def mnp_square(x):
  533. return mnp.square(x)
  534. def onp_square(x):
  535. return onp.square(x)
  536. @pytest.mark.level1
  537. @pytest.mark.platform_arm_ascend_training
  538. @pytest.mark.platform_x86_ascend_training
  539. @pytest.mark.platform_x86_gpu_training
  540. @pytest.mark.platform_x86_cpu
  541. @pytest.mark.env_onecard
  542. def test_square():
  543. run_unary_test(mnp_square, onp_square, test_case)
  544. def mnp_sqrt(x):
  545. return mnp.sqrt(x)
  546. def onp_sqrt(x):
  547. return onp.sqrt(x)
  548. @pytest.mark.level1
  549. @pytest.mark.platform_arm_ascend_training
  550. @pytest.mark.platform_x86_ascend_training
  551. @pytest.mark.platform_x86_gpu_training
  552. @pytest.mark.platform_x86_cpu
  553. @pytest.mark.env_onecard
  554. def test_sqrt():
  555. run_unary_test(mnp_sqrt, onp_sqrt, test_case)
  556. def mnp_reciprocal(x):
  557. return mnp.reciprocal(x)
  558. def onp_reciprocal(x):
  559. return onp.reciprocal(x)
  560. @pytest.mark.level1
  561. @pytest.mark.platform_arm_ascend_training
  562. @pytest.mark.platform_x86_ascend_training
  563. @pytest.mark.platform_x86_gpu_training
  564. @pytest.mark.platform_x86_cpu
  565. @pytest.mark.env_onecard
  566. def test_reciprocal():
  567. run_unary_test(mnp_reciprocal, onp_reciprocal, test_case)
  568. def mnp_log(x):
  569. return mnp.log(x)
  570. def onp_log(x):
  571. return onp.log(x)
  572. @pytest.mark.level1
  573. @pytest.mark.platform_arm_ascend_training
  574. @pytest.mark.platform_x86_ascend_training
  575. @pytest.mark.platform_x86_gpu_training
  576. @pytest.mark.platform_x86_cpu
  577. @pytest.mark.env_onecard
  578. def test_log():
  579. run_unary_test(mnp.log, onp.log, test_case, error=1e-5)
  580. def mnp_maximum(x1, x2):
  581. return mnp.maximum(x1, x2)
  582. def onp_maximum(x1, x2):
  583. return onp.maximum(x1, x2)
  584. @pytest.mark.level1
  585. @pytest.mark.platform_arm_ascend_training
  586. @pytest.mark.platform_x86_ascend_training
  587. @pytest.mark.platform_x86_gpu_training
  588. @pytest.mark.platform_x86_cpu
  589. @pytest.mark.env_onecard
  590. def test_maximum():
  591. run_binop_test(mnp_maximum, onp_maximum, test_case)
  592. def mnp_clip(x):
  593. a = mnp.clip(x, mnp.asarray(10.0), mnp.asarray([2,]))
  594. b = mnp.clip(x, 0, 1)
  595. c = mnp.clip(x, mnp.asarray(0), mnp.asarray(10), dtype=mnp.float32)
  596. return a, b, c
  597. def onp_clip(x):
  598. a = onp.clip(x, onp.asarray(10.0), onp.asarray([2,]))
  599. b = onp.clip(x, 0, 1)
  600. c = onp.clip(x, onp.asarray(0), onp.asarray(10), dtype=onp.float32)
  601. return a, b, c
  602. @pytest.mark.level1
  603. @pytest.mark.platform_arm_ascend_training
  604. @pytest.mark.platform_x86_ascend_training
  605. @pytest.mark.platform_x86_gpu_training
  606. @pytest.mark.platform_x86_cpu
  607. @pytest.mark.env_onecard
  608. def test_clip():
  609. run_unary_test(mnp_clip, onp_clip, test_case)
  610. def mnp_amax(x, mask):
  611. a = mnp.amax(x)
  612. b = mnp.amax(x, axis=-3)
  613. c = mnp.amax(x, keepdims=True)
  614. d = mnp.amax(x, initial=3)
  615. e = mnp.amax(x, axis=(0, 1), keepdims=True)
  616. f = mnp.amax(x, initial=4, where=mask)
  617. g = mnp.amax(x, initial=5, where=mask, keepdims=True)
  618. h = mnp.amax(x, axis=(1, 2, 3), initial=6, where=mask)
  619. return a, b, c, d, e, f, g, h
  620. def onp_amax(x, mask):
  621. a = onp.amax(x)
  622. b = onp.amax(x, axis=-3)
  623. c = onp.amax(x, keepdims=True)
  624. d = onp.amax(x, initial=3)
  625. e = onp.amax(x, axis=(0, 1), keepdims=True)
  626. f = onp.amax(x, initial=4, where=mask)
  627. g = onp.amax(x, initial=5, where=mask, keepdims=True)
  628. h = onp.amax(x, axis=(1, 2, 3), initial=6, where=mask)
  629. return a, b, c, d, e, f, g, h
  630. @pytest.mark.level1
  631. @pytest.mark.platform_arm_ascend_training
  632. @pytest.mark.platform_x86_ascend_training
  633. @pytest.mark.platform_x86_gpu_training
  634. @pytest.mark.platform_x86_cpu
  635. @pytest.mark.env_onecard
  636. def test_amax():
  637. a = rand_int(2, 3, 4, 5).astype('float32')
  638. mask = rand_bool(2, 3, 4, 5)
  639. run_multi_test(mnp_amax, onp_amax, (a, mask))
  640. def mnp_amin(x, mask):
  641. a = mnp.amin(x)
  642. b = mnp.amin(x, axis=-3)
  643. c = mnp.amin(x, keepdims=True)
  644. d = mnp.amin(x, initial=-1)
  645. e = mnp.amin(x, axis=(0, 1), keepdims=True)
  646. f = mnp.amin(x, initial=-2, where=mask)
  647. g = mnp.amin(x, initial=-3, where=mask, keepdims=True)
  648. h = mnp.amin(x, axis=(1, 2, 3), initial=-4, where=mask)
  649. return a, b, c, d, e, f, g, h
  650. def onp_amin(x, mask):
  651. a = onp.amin(x)
  652. b = onp.amin(x, axis=-3)
  653. c = onp.amin(x, keepdims=True)
  654. d = onp.amin(x, initial=-1)
  655. e = onp.amin(x, axis=(0, 1), keepdims=True)
  656. f = onp.amin(x, initial=-2, where=mask)
  657. g = onp.amin(x, initial=-3, where=mask, keepdims=True)
  658. h = onp.amin(x, axis=(1, 2, 3), initial=-4, where=mask)
  659. return a, b, c, d, e, f, g, h
  660. @pytest.mark.level1
  661. @pytest.mark.platform_arm_ascend_training
  662. @pytest.mark.platform_x86_ascend_training
  663. @pytest.mark.platform_x86_gpu_training
  664. @pytest.mark.platform_x86_cpu
  665. @pytest.mark.env_onecard
  666. def test_amin():
  667. a = rand_int(2, 3, 4, 5).astype('float32')
  668. mask = rand_bool(2, 3, 4, 5)
  669. run_multi_test(mnp_amin, onp_amin, (a, mask))
  670. def mnp_hypot(x1, x2):
  671. return mnp.hypot(x1, x2)
  672. def onp_hypot(x1, x2):
  673. return onp.hypot(x1, x2)
  674. @pytest.mark.level1
  675. @pytest.mark.platform_arm_ascend_training
  676. @pytest.mark.platform_x86_ascend_training
  677. @pytest.mark.platform_x86_gpu_training
  678. @pytest.mark.platform_x86_cpu
  679. @pytest.mark.env_onecard
  680. def test_hypot():
  681. run_binop_test(mnp_hypot, onp_hypot, test_case)
  682. def mnp_heaviside(x1, x2):
  683. return mnp.heaviside(x1, x2)
  684. def onp_heaviside(x1, x2):
  685. return onp.heaviside(x1, x2)
  686. @pytest.mark.level1
  687. @pytest.mark.platform_arm_ascend_training
  688. @pytest.mark.platform_x86_ascend_training
  689. @pytest.mark.platform_x86_gpu_training
  690. @pytest.mark.platform_x86_cpu
  691. @pytest.mark.env_onecard
  692. def test_heaviside():
  693. broadcastables = test_case.broadcastables
  694. for b1 in broadcastables:
  695. for b2 in broadcastables:
  696. b = onp.subtract(b1, b2)
  697. match_res(mnp_heaviside, onp_heaviside, b, b1)
  698. match_res(mnp_heaviside, onp_heaviside, b, b2)
  699. def mnp_floor(x):
  700. return mnp.floor(x)
  701. def onp_floor(x):
  702. return onp.floor(x)
  703. @pytest.mark.level1
  704. @pytest.mark.platform_arm_ascend_training
  705. @pytest.mark.platform_x86_ascend_training
  706. @pytest.mark.platform_x86_gpu_training
  707. @pytest.mark.platform_x86_cpu
  708. @pytest.mark.env_onecard
  709. def test_floor():
  710. run_unary_test(mnp_floor, onp_floor, test_case)
  711. x = rand_int(2, 3) * onp.random.rand(2, 3)
  712. match_res(mnp_floor, onp_floor, x)
  713. match_res(mnp_floor, onp_floor, -x)
  714. def mnp_floor_divide(x, y):
  715. return mnp.floor_divide(x, y)
  716. def onp_floor_divde(x, y):
  717. return onp.floor_divide(x, y)
  718. @pytest.mark.level1
  719. @pytest.mark.platform_arm_ascend_training
  720. @pytest.mark.platform_x86_ascend_training
  721. @pytest.mark.platform_x86_gpu_training
  722. @pytest.mark.platform_x86_cpu
  723. @pytest.mark.env_onecard
  724. def test_floor_divide():
  725. run_binop_test(mnp_floor_divide, onp_floor_divde, test_case)
  726. def mnp_remainder(x, y):
  727. return mnp.remainder(x, y)
  728. def onp_remainder(x, y):
  729. return onp.remainder(x, y)
  730. @pytest.mark.level1
  731. @pytest.mark.platform_arm_ascend_training
  732. @pytest.mark.platform_x86_ascend_training
  733. @pytest.mark.platform_x86_gpu_training
  734. @pytest.mark.platform_x86_cpu
  735. @pytest.mark.env_onecard
  736. def test_remainder():
  737. x = rand_int(2, 3)
  738. y = rand_int(2, 3)
  739. match_res(mnp_remainder, onp_remainder, x, y)
  740. def mnp_mod(x, y):
  741. return mnp.mod(x, y)
  742. def onp_mod(x, y):
  743. return onp.mod(x, y)
  744. @pytest.mark.level1
  745. @pytest.mark.platform_arm_ascend_training
  746. @pytest.mark.platform_x86_ascend_training
  747. @pytest.mark.platform_x86_gpu_training
  748. @pytest.mark.platform_x86_cpu
  749. @pytest.mark.env_onecard
  750. def test_mod():
  751. x = rand_int(2, 3)
  752. y = rand_int(2, 3)
  753. match_res(mnp_mod, onp_mod, x, y)
  754. def mnp_fmod(x, y):
  755. return mnp.fmod(x, y)
  756. def onp_fmod(x, y):
  757. return onp.fmod(x, y)
  758. @pytest.mark.level1
  759. @pytest.mark.platform_x86_gpu_training
  760. @pytest.mark.platform_x86_cpu
  761. @pytest.mark.env_onecard
  762. def test_fmod():
  763. x = rand_int(2, 3)
  764. y = rand_int(2, 3)
  765. match_res(mnp_fmod, onp_fmod, x, y)
  766. def mnp_fix(x):
  767. return mnp.fix(x)
  768. def onp_fix(x):
  769. return onp.fix(x)
  770. @pytest.mark.level1
  771. @pytest.mark.platform_arm_ascend_training
  772. @pytest.mark.platform_x86_ascend_training
  773. @pytest.mark.platform_x86_gpu_training
  774. @pytest.mark.platform_x86_cpu
  775. @pytest.mark.env_onecard
  776. def test_fix():
  777. x = rand_int(2, 3)
  778. y = rand_int(2, 3)
  779. floats = onp.divide(onp.subtract(x, y), y)
  780. match_res(mnp_fix, onp_fix, floats, error=1e-5)
  781. def mnp_trunc(x):
  782. return mnp.trunc(x)
  783. def onp_trunc(x):
  784. return onp.trunc(x)
  785. @pytest.mark.level1
  786. @pytest.mark.platform_arm_ascend_training
  787. @pytest.mark.platform_x86_ascend_training
  788. @pytest.mark.platform_x86_gpu_training
  789. @pytest.mark.platform_x86_cpu
  790. @pytest.mark.env_onecard
  791. def test_trunc():
  792. x = rand_int(2, 3)
  793. y = rand_int(2, 3)
  794. floats = onp.divide(onp.subtract(x, y), y)
  795. match_res(mnp_trunc, onp_trunc, floats, error=1e-5)
  796. def mnp_exp(x):
  797. return mnp.exp(x)
  798. def onp_exp(x):
  799. return onp.exp(x)
  800. @pytest.mark.level1
  801. @pytest.mark.platform_arm_ascend_training
  802. @pytest.mark.platform_x86_ascend_training
  803. @pytest.mark.platform_x86_gpu_training
  804. @pytest.mark.platform_x86_cpu
  805. @pytest.mark.env_onecard
  806. def test_exp():
  807. run_unary_test(mnp_exp, onp_exp, test_case, error=5)
  808. def mnp_expm1(x):
  809. return mnp.expm1(x)
  810. def onp_expm1(x):
  811. return onp.expm1(x)
  812. @pytest.mark.level1
  813. @pytest.mark.platform_arm_ascend_training
  814. @pytest.mark.platform_x86_ascend_training
  815. @pytest.mark.platform_x86_gpu_training
  816. @pytest.mark.platform_x86_cpu
  817. @pytest.mark.env_onecard
  818. def test_expm1():
  819. run_unary_test(mnp_expm1, onp_expm1, test_case, error=5)
  820. def mnp_positive(x, out, where):
  821. return mnp.positive(x, out=out, where=where)
  822. def onp_positive(x, out, where):
  823. return onp.positive(x, out=out, where=where)
  824. @pytest.mark.level1
  825. @pytest.mark.platform_arm_ascend_training
  826. @pytest.mark.platform_x86_ascend_training
  827. @pytest.mark.platform_x86_gpu_training
  828. @pytest.mark.platform_x86_cpu
  829. @pytest.mark.env_onecard
  830. def test_positive():
  831. arr = onp.arange(-6, 6).reshape((2, 2, 3)).astype('float32')
  832. out_lst = [onp.ones((2, 2, 3)).astype('float32'), onp.ones((5, 2, 2, 3)).astype('float32')]
  833. where_lst = [onp.full((2, 2, 3), [True, False, True]), onp.full((2, 3), False)]
  834. for out in out_lst:
  835. for where in where_lst:
  836. onp_pos = onp_positive(arr, out=out, where=where)
  837. mnp_pos = mnp_positive(mnp.asarray(arr), mnp.asarray(out), mnp.asarray(where))
  838. match_array(mnp_pos.asnumpy(), onp_pos)
  839. def mnp_negative(x, out, where):
  840. return mnp.negative(x, out=out, where=where)
  841. def onp_negative(x, out, where):
  842. return onp.negative(x, out=out, where=where)
  843. @pytest.mark.level1
  844. @pytest.mark.platform_arm_ascend_training
  845. @pytest.mark.platform_x86_ascend_training
  846. @pytest.mark.platform_x86_gpu_training
  847. @pytest.mark.platform_x86_cpu
  848. @pytest.mark.env_onecard
  849. def test_negative():
  850. arr = onp.arange(-6, 6).reshape((2, 2, 3)).astype('float32')
  851. out_lst = [onp.ones((2, 2, 3)).astype('float32'), onp.ones((5, 2, 2, 3)).astype('float32')]
  852. where_lst = [onp.full((2, 2, 3), [True, False, True]), onp.full((2, 3), False)]
  853. for out in out_lst:
  854. for where in where_lst:
  855. onp_neg = onp_negative(arr, out=out, where=where)
  856. mnp_neg = mnp_negative(mnp.asarray(arr), mnp.asarray(out), mnp.asarray(where))
  857. match_array(mnp_neg.asnumpy(), onp_neg, 1e-5)
  858. @pytest.mark.level1
  859. @pytest.mark.platform_arm_ascend_training
  860. @pytest.mark.platform_x86_ascend_training
  861. @pytest.mark.platform_x86_gpu_training
  862. @pytest.mark.platform_x86_cpu
  863. @pytest.mark.env_onecard
  864. def test_exception_innner():
  865. with pytest.raises(ValueError):
  866. mnp.inner(mnp.asarray(test_case.arrs[0]),
  867. mnp.asarray(test_case.arrs[1]))
  868. @pytest.mark.level1
  869. @pytest.mark.platform_arm_ascend_training
  870. @pytest.mark.platform_x86_ascend_training
  871. @pytest.mark.platform_x86_gpu_training
  872. @pytest.mark.platform_x86_cpu
  873. @pytest.mark.env_onecard
  874. def test_exception_add():
  875. with pytest.raises(ValueError):
  876. mnp.add(mnp.asarray(test_case.arrs[1]), mnp.asarray(test_case.arrs[2]))
  877. @pytest.mark.level1
  878. @pytest.mark.platform_arm_ascend_training
  879. @pytest.mark.platform_x86_ascend_training
  880. @pytest.mark.platform_x86_gpu_training
  881. @pytest.mark.platform_x86_cpu
  882. @pytest.mark.env_onecard
  883. def test_exception_mean():
  884. with pytest.raises(ValueError):
  885. mnp.mean(mnp.asarray(test_case.arrs[0]), (-1, 0))