You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_math_ops.py 16 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567
  1. # Copyright 2020-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """unit tests for numpy math operations"""
  16. from functools import partial
  17. import pytest
  18. import numpy as onp
  19. import mindspore.numpy as mnp
  20. from mindspore import context
  21. def rand_int(*shape):
  22. """return an random integer array with parameter shape"""
  23. res = onp.random.randint(low=1, high=5, size=shape)
  24. if isinstance(res, onp.ndarray):
  25. return res.astype(onp.float32)
  26. return float(res)
  27. # return an random boolean array
  28. def rand_bool(*shape):
  29. return onp.random.rand(*shape) > 0.5
  30. class Cases():
  31. def __init__(self):
  32. self.device_cpu = context.get_context('device_target')
  33. self.arrs = [
  34. rand_int(2),
  35. rand_int(2, 3),
  36. rand_int(2, 3, 4),
  37. rand_int(2, 3, 4, 5),
  38. ]
  39. # scalars expanded across the 0th dimension
  40. self.scalars = [
  41. rand_int(),
  42. rand_int(1),
  43. rand_int(1, 1),
  44. rand_int(1, 1, 1, 1),
  45. ]
  46. # empty arrays
  47. self.empty_arrs = [
  48. rand_int(0),
  49. rand_int(4, 0),
  50. rand_int(2, 0, 2),
  51. rand_int(5, 0, 7, 0),
  52. ]
  53. # arrays of the same size expanded across the 0th dimension
  54. self.expanded_arrs = [
  55. rand_int(2, 3),
  56. rand_int(1, 2, 3),
  57. rand_int(1, 1, 2, 3),
  58. rand_int(1, 1, 1, 2, 3),
  59. ]
  60. # arrays of the same size expanded across the 0th dimension
  61. self.expanded_arrs = [
  62. rand_int(2, 3),
  63. rand_int(1, 2, 3),
  64. rand_int(1, 1, 2, 3),
  65. rand_int(1, 1, 1, 2, 3),
  66. ]
  67. # arrays with last dimension aligned
  68. self.aligned_arrs = [
  69. rand_int(2, 3),
  70. rand_int(1, 4, 3),
  71. rand_int(5, 1, 2, 3),
  72. rand_int(4, 2, 1, 1, 3),
  73. ]
  74. # arrays which can be broadcast
  75. self.broadcastables = [
  76. rand_int(5),
  77. rand_int(6, 1),
  78. rand_int(7, 1, 5),
  79. rand_int(8, 1, 6, 1)
  80. ]
  81. # boolean arrays which can be broadcast
  82. self.bool_broadcastables = [
  83. rand_bool(),
  84. rand_bool(1),
  85. rand_bool(5),
  86. rand_bool(6, 1),
  87. rand_bool(7, 1, 5),
  88. rand_bool(8, 1, 6, 1),
  89. ]
  90. # core dimension 0 is matched for each
  91. # pair of array[i] and array[i + 1]
  92. self.core_broadcastables = [
  93. rand_int(3),
  94. rand_int(3),
  95. rand_int(6),
  96. rand_int(6, 4),
  97. rand_int(5, 2),
  98. rand_int(2),
  99. rand_int(2, 9),
  100. rand_int(9, 8),
  101. rand_int(6),
  102. rand_int(2, 6, 5),
  103. rand_int(9, 2, 7),
  104. rand_int(7),
  105. rand_int(5, 2, 4),
  106. rand_int(6, 1, 4, 9),
  107. rand_int(7, 1, 5, 3, 2),
  108. rand_int(8, 1, 6, 1, 2, 9),
  109. ]
  110. # arrays with dimensions of size 1
  111. self.nested_arrs = [
  112. rand_int(1),
  113. rand_int(1, 2),
  114. rand_int(3, 1, 8),
  115. rand_int(1, 3, 9, 1),
  116. ]
  117. test_case = Cases()
  118. context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
  119. def mnp_add(x1, x2):
  120. return mnp.add(x1, x2)
  121. def onp_add(x1, x2):
  122. return onp.add(x1, x2)
  123. def mnp_subtract(x1, x2):
  124. return mnp.subtract(x1, x2)
  125. def onp_subtract(x1, x2):
  126. return onp.subtract(x1, x2)
  127. def mnp_mutiply(x1, x2):
  128. return mnp.multiply(x1, x2)
  129. def onp_multiply(x1, x2):
  130. return onp.multiply(x1, x2)
  131. def mnp_divide(x1, x2):
  132. return mnp.divide(x1, x2)
  133. def onp_divide(x1, x2):
  134. return onp.divide(x1, x2)
  135. def mnp_power(x1, x2):
  136. return mnp.power(x1, x2)
  137. def onp_power(x1, x2):
  138. return onp.power(x1, x2)
  139. def mnp_inner(a, b):
  140. return mnp.inner(a, b)
  141. def onp_inner(a, b):
  142. return onp.inner(a, b)
  143. def mnp_dot(a, b):
  144. return mnp.dot(a, b)
  145. def onp_dot(a, b):
  146. return onp.dot(a, b)
  147. def mnp_outer(a, b):
  148. return mnp.outer(a, b)
  149. def onp_outer(a, b):
  150. return onp.outer(a, b)
  151. def mnp_add_kwargs(x, y, where=None, out=None):
  152. return mnp.add(x, y, where=where, out=out)
  153. def onp_add_kwargs(x, y, where=None, out=None):
  154. return onp.add(x, y, where=where, out=out)
  155. def mnp_subtract_kwargs(x, y, where=None, out=None):
  156. return mnp.subtract(x, y, where=where, out=out)
  157. def onp_subtract_kwargs(x, y, where=None, out=None):
  158. return onp.subtract(x, y, where=where, out=out)
  159. def mnp_multiply_kwargs(x, y, where=None, out=None):
  160. return mnp.multiply(x, y, where=where, out=out)
  161. def onp_multiply_kwargs(x, y, where=None, out=None):
  162. return onp.multiply(x, y, where=where, out=out)
  163. def mnp_divide_kwargs(x, y, where=None, out=None):
  164. return mnp.divide(x, y, where=where, out=out)
  165. def onp_divide_kwargs(x, y, where=None, out=None):
  166. return onp.divide(x, y, where=where, out=out)
  167. def mnp_power_kwargs(x, y, where=None, out=None):
  168. return mnp.power(x, y, where=where, out=out)
  169. def onp_power_kwargs(x, y, where=None, out=None):
  170. return onp.power(x, y, where=where, out=out)
  171. def mnp_tensordot(x, y):
  172. a = mnp.tensordot(x, y)
  173. b = mnp.tensordot(x, y, axes=0)
  174. c = mnp.tensordot(x, y, axes=1)
  175. d = mnp.tensordot(x, y, axes=2)
  176. e = mnp.tensordot(x, y, axes=(3, 0))
  177. f = mnp.tensordot(x, y, axes=[2, 1])
  178. g = mnp.tensordot(x, y, axes=((2, 3), (0, 1)))
  179. h = mnp.tensordot(x, y, axes=[[3, 2], [1, 0]])
  180. return a, b, c, d, e, f, g, h
  181. def onp_tensordot(x, y):
  182. a = onp.tensordot(x, y)
  183. b = onp.tensordot(x, y, axes=0)
  184. c = onp.tensordot(x, y, axes=1)
  185. d = onp.tensordot(x, y, axes=2)
  186. e = onp.tensordot(x, y, axes=(3, 0))
  187. f = onp.tensordot(x, y, axes=[2, 1])
  188. g = onp.tensordot(x, y, axes=((2, 3), (0, 1)))
  189. h = onp.tensordot(x, y, axes=[[3, 2], [1, 0]])
  190. return a, b, c, d, e, f, g, h
  191. def run_binop_test(mnp_fn, onp_fn):
  192. for arr in test_case.arrs:
  193. match_res(mnp_fn, onp_fn, arr, arr)
  194. for scalar in test_case.scalars:
  195. match_res(mnp_fn, onp_fn, arr, scalar)
  196. match_res(mnp_fn, onp_fn, scalar, arr)
  197. for scalar1 in test_case.scalars:
  198. for scalar2 in test_case.scalars:
  199. match_res(mnp_fn, onp_fn, scalar1, scalar2)
  200. for expanded_arr1 in test_case.expanded_arrs:
  201. for expanded_arr2 in test_case.expanded_arrs:
  202. match_res(mnp_fn, onp_fn, expanded_arr1, expanded_arr2)
  203. for broadcastable1 in test_case.broadcastables:
  204. for broadcastable2 in test_case.broadcastables:
  205. match_res(mnp_fn, onp_fn, broadcastable1, broadcastable2)
  206. def run_multi_test(mnp_fn, onp_fn, arrs):
  207. mnp_arrs = map(mnp.asarray, arrs)
  208. for actual, expected in zip(mnp_fn(*mnp_arrs), onp_fn(*arrs)):
  209. match_array(actual.asnumpy(), expected)
  210. @pytest.mark.level1
  211. @pytest.mark.platform_arm_ascend_training
  212. @pytest.mark.platform_x86_ascend_training
  213. @pytest.mark.platform_x86_gpu_training
  214. @pytest.mark.platform_x86_cpu
  215. @pytest.mark.env_onecard
  216. def test_add():
  217. run_binop_test(mnp_add, onp_add)
  218. @pytest.mark.level1
  219. @pytest.mark.platform_arm_ascend_training
  220. @pytest.mark.platform_x86_ascend_training
  221. @pytest.mark.platform_x86_gpu_training
  222. @pytest.mark.platform_x86_cpu
  223. @pytest.mark.env_onecard
  224. def test_subtract():
  225. run_binop_test(mnp_subtract, onp_subtract)
  226. @pytest.mark.level1
  227. @pytest.mark.platform_arm_ascend_training
  228. @pytest.mark.platform_x86_ascend_training
  229. @pytest.mark.platform_x86_gpu_training
  230. @pytest.mark.platform_x86_cpu
  231. @pytest.mark.env_onecard
  232. def test_multiply():
  233. run_binop_test(mnp_mutiply, onp_multiply)
  234. @pytest.mark.level1
  235. @pytest.mark.platform_arm_ascend_training
  236. @pytest.mark.platform_x86_ascend_training
  237. @pytest.mark.platform_x86_gpu_training
  238. @pytest.mark.platform_x86_cpu
  239. @pytest.mark.env_onecard
  240. def test_divide():
  241. run_binop_test(mnp_divide, onp_divide)
  242. @pytest.mark.level1
  243. @pytest.mark.platform_arm_ascend_training
  244. @pytest.mark.platform_x86_ascend_training
  245. @pytest.mark.platform_x86_gpu_training
  246. @pytest.mark.platform_x86_cpu
  247. @pytest.mark.env_onecard
  248. def test_power():
  249. run_binop_test(mnp_power, onp_power)
  250. @pytest.mark.level1
  251. @pytest.mark.platform_arm_ascend_training
  252. @pytest.mark.platform_x86_ascend_training
  253. @pytest.mark.platform_x86_gpu_training
  254. @pytest.mark.platform_x86_cpu
  255. @pytest.mark.env_onecard
  256. def test_inner():
  257. for arr1 in test_case.aligned_arrs:
  258. for arr2 in test_case.aligned_arrs:
  259. match_res(mnp_inner, onp_inner, arr1, arr2)
  260. for scalar1 in test_case.scalars:
  261. for scalar2 in test_case.scalars:
  262. match_res(mnp_inner, onp_inner,
  263. scalar1, scalar2)
  264. @pytest.mark.level1
  265. @pytest.mark.platform_arm_ascend_training
  266. @pytest.mark.platform_x86_ascend_training
  267. @pytest.mark.platform_x86_gpu_training
  268. @pytest.mark.platform_x86_cpu
  269. @pytest.mark.env_onecard
  270. def test_dot():
  271. # test case (1D, 1D)
  272. match_res(mnp_dot, onp_dot, rand_int(3), rand_int(3))
  273. # test case (2D, 2D)
  274. match_res(mnp_dot, onp_dot, rand_int(4, 7), rand_int(7, 2))
  275. # test case (0D, _) (_, 0D)
  276. match_res(mnp_dot, onp_dot, rand_int(), rand_int(1, 9, 3))
  277. match_res(mnp_dot, onp_dot, rand_int(8, 5, 6, 3), rand_int())
  278. # test case (ND, 1D)
  279. match_res(mnp_dot, onp_dot, rand_int(2, 4, 5), rand_int(5))
  280. # test case (ND, MD)
  281. match_res(mnp_dot, onp_dot, rand_int(5, 4, 1, 8), rand_int(8, 3))
  282. for i in range(8):
  283. match_res(mnp_dot, onp_dot,
  284. test_case.core_broadcastables[2*i], test_case.core_broadcastables[2*i + 1])
  285. @pytest.mark.level1
  286. @pytest.mark.platform_arm_ascend_training
  287. @pytest.mark.platform_x86_ascend_training
  288. @pytest.mark.platform_x86_gpu_training
  289. @pytest.mark.platform_x86_cpu
  290. @pytest.mark.env_onecard
  291. def test_outer():
  292. run_binop_test(mnp_outer, onp_outer)
  293. @pytest.mark.level1
  294. @pytest.mark.platform_arm_ascend_training
  295. @pytest.mark.platform_x86_ascend_training
  296. @pytest.mark.platform_x86_gpu_training
  297. @pytest.mark.platform_x86_cpu
  298. @pytest.mark.env_onecard
  299. def test_add_kwargs():
  300. for where in test_case.bool_broadcastables[:2]:
  301. for x in test_case.broadcastables[:2]:
  302. for y in test_case.broadcastables[:2]:
  303. shape_out = onp.broadcast(where, x, y).shape
  304. out = rand_int(*shape_out)
  305. match_res(mnp_add_kwargs, onp_add_kwargs, x, y, where, out)
  306. @pytest.mark.level1
  307. @pytest.mark.platform_arm_ascend_training
  308. @pytest.mark.platform_x86_ascend_training
  309. @pytest.mark.platform_x86_gpu_training
  310. @pytest.mark.platform_x86_cpu
  311. @pytest.mark.env_onecard
  312. def test_tensordot():
  313. x = rand_int(4, 2, 7, 7)
  314. y = rand_int(7, 7, 6)
  315. run_multi_test(mnp_tensordot, onp_tensordot, (x, y))
  316. @pytest.mark.level1
  317. @pytest.mark.platform_arm_ascend_training
  318. @pytest.mark.platform_x86_ascend_training
  319. @pytest.mark.platform_x86_gpu_training
  320. @pytest.mark.platform_x86_cpu
  321. @pytest.mark.env_onecard
  322. def test_type_promotion():
  323. arr = rand_int(2, 3)
  324. onp_sum = onp_add(arr, arr)
  325. a = mnp.asarray(arr, dtype='float16')
  326. b = mnp.asarray(arr, dtype='float32')
  327. c = mnp.asarray(arr, dtype='int32')
  328. match_array(mnp_add(a, b).asnumpy(), onp_sum)
  329. match_array(mnp_add(b, c).asnumpy(), onp_sum)
  330. def mnp_absolute(x):
  331. return mnp.absolute(x)
  332. def onp_absolute(x):
  333. return onp.absolute(x)
  334. @pytest.mark.level1
  335. @pytest.mark.platform_arm_ascend_training
  336. @pytest.mark.platform_x86_ascend_training
  337. @pytest.mark.platform_x86_gpu_training
  338. @pytest.mark.platform_x86_cpu
  339. @pytest.mark.env_onecard
  340. def test_absolute():
  341. arr = rand_int(2, 3)
  342. a = mnp.asarray(arr, dtype='float16')
  343. b = mnp.asarray(arr, dtype='float32')
  344. c = mnp.asarray(arr, dtype='uint8')
  345. d = mnp.asarray(arr, dtype='bool')
  346. match_array(mnp_absolute(a).asnumpy(), onp_absolute(a.asnumpy()))
  347. match_array(mnp_absolute(b).asnumpy(), onp_absolute(b.asnumpy()))
  348. match_array(mnp_absolute(c).asnumpy(), onp_absolute(c.asnumpy()))
  349. match_array(mnp_absolute(d).asnumpy(), onp_absolute(d.asnumpy()))
  350. where = rand_int(2, 3).astype('bool')
  351. out = rand_int(2, 3)
  352. match_array(mnp.absolute(a, out=mnp.asarray(out), where=mnp.asarray(where)).asnumpy(),
  353. onp.absolute(a.asnumpy(), out=out, where=where))
  354. def mnp_add_dtype(x1, x2, out, where):
  355. a = mnp.add(x1, x2, dtype=mnp.float16)
  356. b = mnp.add(x1, x2, out=out, dtype=mnp.float16)
  357. c = mnp.add(x1, x2, where=where, dtype=mnp.float16)
  358. d = mnp.add(x1, x2, out=out, where=where, dtype=mnp.float16)
  359. return a, b, c, d
  360. def onp_add_dtype(x1, x2, out, where):
  361. a = onp.add(x1, x2, dtype=onp.float16)
  362. b = onp.add(x1, x2, out=out, dtype=onp.float16)
  363. c = onp.add(x1, x2, where=where, dtype=onp.float16)
  364. d = onp.add(x1, x2, out=out, where=where, dtype=onp.float16)
  365. return a, b, c, d
  366. @pytest.mark.level1
  367. @pytest.mark.platform_arm_ascend_training
  368. @pytest.mark.platform_x86_ascend_training
  369. @pytest.mark.platform_x86_gpu_training
  370. @pytest.mark.platform_x86_cpu
  371. @pytest.mark.env_onecard
  372. def test_add_dtype():
  373. x1 = rand_int(2, 3).astype('int32')
  374. x2 = rand_int(2, 3).astype('int32')
  375. out = rand_int(2, 3).astype('float32')
  376. where = rand_bool(2, 3)
  377. arrs = (x1, x2, out, where)
  378. mnp_arrs = map(mnp.array, arrs)
  379. mnp_res = mnp_add_dtype(*mnp_arrs)
  380. onp_res = onp_add_dtype(*arrs)
  381. for actual, expected in zip(mnp_res, onp_res):
  382. assert actual.asnumpy().dtype == expected.dtype
  383. # check if the output from mnp function and onp function applied on the arrays are matched
  384. def match_res(mnp_fn, onp_fn, *arrs):
  385. mnp_arrs = map(partial(mnp.asarray, dtype='float32'), arrs)
  386. mnp_res = mnp_fn(*mnp_arrs)
  387. onp_res = onp_fn(*arrs)
  388. if isinstance(mnp_res, (tuple, list)):
  389. for actual, expected in zip(mnp_res, onp_res):
  390. match_array(actual.asnumpy(), expected)
  391. else:
  392. match_array(mnp_res.asnumpy(), onp_res)
  393. def match_array(actual, expected, error=5):
  394. if error > 0:
  395. onp.testing.assert_almost_equal(actual.tolist(), expected.tolist(),
  396. decimal=error)
  397. else:
  398. onp.testing.assert_equal(actual.tolist(), expected.tolist())
  399. @pytest.mark.level1
  400. @pytest.mark.platform_arm_ascend_training
  401. @pytest.mark.platform_x86_ascend_training
  402. @pytest.mark.platform_x86_gpu_training
  403. @pytest.mark.platform_x86_cpu
  404. @pytest.mark.env_onecard
  405. def test_exception_innner():
  406. with pytest.raises(ValueError):
  407. mnp.inner(mnp.asarray(test_case.arrs[0]),
  408. mnp.asarray(test_case.arrs[1]))
  409. @pytest.mark.level1
  410. @pytest.mark.platform_arm_ascend_training
  411. @pytest.mark.platform_x86_ascend_training
  412. @pytest.mark.platform_x86_gpu_training
  413. @pytest.mark.platform_x86_cpu
  414. @pytest.mark.env_onecard
  415. def test_exception_add():
  416. with pytest.raises(ValueError):
  417. mnp.add(mnp.asarray(test_case.arrs[1]), mnp.asarray(test_case.arrs[2]))
  418. @pytest.mark.level1
  419. @pytest.mark.platform_arm_ascend_training
  420. @pytest.mark.platform_x86_ascend_training
  421. @pytest.mark.platform_x86_gpu_training
  422. @pytest.mark.platform_x86_cpu
  423. @pytest.mark.env_onecard
  424. def test_exception_mean():
  425. with pytest.raises(ValueError):
  426. mnp.mean(mnp.asarray(test_case.arrs[0]), (-1, 0))