You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_array_ops.py 14 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """ test array ops """
  16. import functools
  17. import numpy as np
  18. import pytest
  19. import mindspore as ms
  20. import mindspore.context as context
  21. from mindspore import Tensor
  22. from mindspore.common import dtype as mstype
  23. from mindspore.nn import Cell
  24. from mindspore import ops
  25. from mindspore.ops import operations as P
  26. from mindspore.ops import prim_attr_register
  27. from mindspore.ops.operations import _inner_ops as inner
  28. from mindspore.ops.primitive import PrimitiveWithInfer
  29. from mindspore.ops.signature import sig_rw, sig_dtype, make_sig
  30. from ..ut_filter import non_graph_engine
  31. from ....mindspore_test_framework.mindspore_test import mindspore_test
  32. from ....mindspore_test_framework.pipeline.forward.compile_forward \
  33. import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
  34. from ....mindspore_test_framework.pipeline.forward.verify_exception \
  35. import pipeline_for_verify_exception_for_case_by_case_config
  36. context.set_context(mode=context.PYNATIVE_MODE)
  37. def test_expand_dims():
  38. input_tensor = Tensor(np.array([[2, 2], [2, 2]]))
  39. expand_dims = P.ExpandDims()
  40. output = expand_dims(input_tensor, 0)
  41. assert output.asnumpy().shape == (1, 2, 2)
  42. def test_cast():
  43. input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
  44. input_x = Tensor(input_np)
  45. td = ms.int32
  46. cast = P.Cast()
  47. result = cast(input_x, td)
  48. expect = input_np.astype(np.int32)
  49. assert np.all(result.asnumpy() == expect)
  50. def test_ones():
  51. ones = P.Ones()
  52. output = ones((2, 3), mstype.int32)
  53. assert output.asnumpy().shape == (2, 3)
  54. assert np.sum(output.asnumpy()) == 6
  55. def test_ones_1():
  56. ones = P.Ones()
  57. output = ones(2, mstype.int32)
  58. assert output.asnumpy().shape == (2,)
  59. assert np.sum(output.asnumpy()) == 2
  60. def test_zeros():
  61. zeros = P.Zeros()
  62. output = zeros((2, 3), mstype.int32)
  63. assert output.asnumpy().shape == (2, 3)
  64. assert np.sum(output.asnumpy()) == 0
  65. def test_zeros_1():
  66. zeros = P.Zeros()
  67. output = zeros(2, mstype.int32)
  68. assert output.asnumpy().shape == (2,)
  69. assert np.sum(output.asnumpy()) == 0
  70. @non_graph_engine
  71. def test_reshape():
  72. input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]))
  73. shp = (3, 2)
  74. reshape = P.Reshape()
  75. output = reshape(input_tensor, shp)
  76. assert output.asnumpy().shape == (3, 2)
  77. def test_transpose():
  78. input_tensor = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]))
  79. perm = (0, 2, 1)
  80. expect = np.array([[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]])
  81. transpose = P.Transpose()
  82. output = transpose(input_tensor, perm)
  83. assert np.all(output.asnumpy() == expect)
  84. def test_squeeze():
  85. input_tensor = Tensor(np.ones(shape=[3, 2, 1]))
  86. squeeze = P.Squeeze(2)
  87. output = squeeze(input_tensor)
  88. assert output.asnumpy().shape == (3, 2)
  89. def test_invert_permutation():
  90. invert_permutation = P.InvertPermutation()
  91. x = (3, 4, 0, 2, 1)
  92. output = invert_permutation(x)
  93. expect = (2, 4, 3, 0, 1)
  94. assert np.all(output == expect)
  95. def test_select():
  96. select = P.Select()
  97. cond = Tensor(np.array([[True, False, False], [False, True, True]]))
  98. x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]))
  99. y = Tensor(np.array([[7, 8, 9], [10, 11, 12]]))
  100. output = select(cond, x, y)
  101. expect = np.array([[1, 8, 9], [10, 5, 6]])
  102. assert np.all(output.asnumpy() == expect)
  103. def test_argmin_invalid_output_type():
  104. P.Argmin(-1, mstype.int64)
  105. P.Argmin(-1, mstype.int32)
  106. with pytest.raises(TypeError):
  107. P.Argmin(-1, mstype.float32)
  108. with pytest.raises(TypeError):
  109. P.Argmin(-1, mstype.float64)
  110. with pytest.raises(TypeError):
  111. P.Argmin(-1, mstype.uint8)
  112. with pytest.raises(TypeError):
  113. P.Argmin(-1, mstype.bool_)
  114. class CustomOP(PrimitiveWithInfer):
  115. __mindspore_signature__ = (sig_dtype.T, sig_dtype.T, sig_dtype.T1,
  116. sig_dtype.T1, sig_dtype.T2, sig_dtype.T2,
  117. sig_dtype.T2, sig_dtype.T3, sig_dtype.T4)
  118. @prim_attr_register
  119. def __init__(self):
  120. pass
  121. def __call__(self, p1, p2, p3, p4, p5, p6, p7, p8, p9):
  122. raise NotImplementedError
  123. class CustomOP2(PrimitiveWithInfer):
  124. __mindspore_signature__ = (
  125. make_sig('p1', sig_rw.RW_WRITE, dtype=sig_dtype.T),
  126. make_sig('p2', dtype=sig_dtype.T),
  127. make_sig('p3', dtype=sig_dtype.T),
  128. )
  129. @prim_attr_register
  130. def __init__(self):
  131. pass
  132. def __call__(self, p1, p2, p3):
  133. raise NotImplementedError
  134. class CustNet1(Cell):
  135. def __init__(self):
  136. super(CustNet1, self).__init__()
  137. self.op = CustomOP()
  138. self.t1 = Tensor(np.ones([2, 2]), dtype=ms.int32)
  139. self.t2 = Tensor(np.ones([1, 5]), dtype=ms.float16)
  140. self.int1 = 3
  141. self.float1 = 5.1
  142. def construct(self):
  143. x = self.op(self.t1, self.t1, self.int1,
  144. self.float1, self.int1, self.float1,
  145. self.t2, self.t1, self.int1)
  146. return x
  147. class CustNet2(Cell):
  148. def __init__(self):
  149. super(CustNet2, self).__init__()
  150. self.op = CustomOP2()
  151. self.t1 = Tensor(np.ones([2, 2]), dtype=ms.int32)
  152. self.t2 = Tensor(np.ones([1, 5]), dtype=ms.float16)
  153. self.int1 = 3
  154. def construct(self):
  155. return self.op(self.t1, self.t2, self.int1)
  156. class CustNet3(Cell):
  157. def __init__(self):
  158. super(CustNet3, self).__init__()
  159. self.op = P.ReduceSum()
  160. self.t1 = Tensor(np.ones([2, 2]), dtype=ms.int32)
  161. self.t2 = Tensor(np.ones([1, 5]), dtype=ms.float16)
  162. self.t2 = 1
  163. def construct(self):
  164. return self.op(self.t1, self.t2)
  165. class MathBinaryNet1(Cell):
  166. def __init__(self):
  167. super(MathBinaryNet1, self).__init__()
  168. self.add = P.Add()
  169. self.mul = P.Mul()
  170. self.max = P.Maximum()
  171. self.number = 3
  172. def construct(self, x):
  173. return self.add(x, self.number) + self.mul(x, self.number) + self.max(x, self.number)
  174. class MathBinaryNet2(Cell):
  175. def __init__(self):
  176. super(MathBinaryNet2, self).__init__()
  177. self.less_equal = P.LessEqual()
  178. self.greater = P.Greater()
  179. self.logic_or = P.LogicalOr()
  180. self.logic_and = P.LogicalAnd()
  181. self.number = 3
  182. self.flag = True
  183. def construct(self, x):
  184. ret_less_equal = self.logic_and(self.less_equal(x, self.number), self.flag)
  185. ret_greater = self.logic_or(self.greater(x, self.number), self.flag)
  186. return self.logic_or(ret_less_equal, ret_greater)
  187. class BatchToSpaceNet(Cell):
  188. def __init__(self):
  189. super(BatchToSpaceNet, self).__init__()
  190. block_size = 2
  191. crops = [[0, 0], [0, 0]]
  192. self.batch_to_space = P.BatchToSpace(block_size, crops)
  193. def construct(self, x):
  194. return self.batch_to_space(x)
  195. class SpaceToBatchNet(Cell):
  196. def __init__(self):
  197. super(SpaceToBatchNet, self).__init__()
  198. block_size = 2
  199. paddings = [[0, 0], [0, 0]]
  200. self.space_to_batch = P.SpaceToBatch(block_size, paddings)
  201. def construct(self, x):
  202. return self.space_to_batch(x)
  203. class PackNet(Cell):
  204. def __init__(self):
  205. super(PackNet, self).__init__()
  206. self.stack = P.Stack()
  207. def construct(self, x):
  208. return self.stack((x, x))
  209. class UnpackNet(Cell):
  210. def __init__(self):
  211. super(UnpackNet, self).__init__()
  212. self.unstack = P.Unstack()
  213. def construct(self, x):
  214. return self.unstack(x)
  215. class SpaceToDepthNet(Cell):
  216. def __init__(self):
  217. super(SpaceToDepthNet, self).__init__()
  218. block_size = 2
  219. self.space_to_depth = P.SpaceToDepth(block_size)
  220. def construct(self, x):
  221. return self.space_to_depth(x)
  222. class DepthToSpaceNet(Cell):
  223. def __init__(self):
  224. super(DepthToSpaceNet, self).__init__()
  225. block_size = 2
  226. self.depth_to_space = P.DepthToSpace(block_size)
  227. def construct(self, x):
  228. return self.depth_to_space(x)
  229. class BatchToSpaceNDNet(Cell):
  230. def __init__(self):
  231. super(BatchToSpaceNDNet, self).__init__()
  232. block_shape = [2, 2]
  233. crops = [[0, 0], [0, 0]]
  234. self.batch_to_space_nd = P.BatchToSpaceND(block_shape, crops)
  235. def construct(self, x):
  236. return self.batch_to_space_nd(x)
  237. class SpaceToBatchNDNet(Cell):
  238. def __init__(self):
  239. super(SpaceToBatchNDNet, self).__init__()
  240. block_shape = [2, 2]
  241. paddings = [[0, 0], [0, 0]]
  242. self.space_to_batch_nd = P.SpaceToBatchND(block_shape, paddings)
  243. def construct(self, x):
  244. return self.space_to_batch_nd(x)
  245. class TensorShapeNet(Cell):
  246. def __init__(self):
  247. super(TensorShapeNet, self).__init__()
  248. self.shape = P.TensorShape()
  249. self.unique = P.Unique()
  250. def construct(self, x):
  251. x, _ = self.unique(x)
  252. return self.shape(x)
  253. class UniqueFunc1(Cell):
  254. def __init__(self):
  255. super(UniqueFunc1, self).__init__()
  256. self.unique = ops.unique
  257. def construct(self, x):
  258. y, idx = self.unique(x)
  259. return y, idx
  260. class UniqueFunc2(Cell):
  261. def __init__(self):
  262. super(UniqueFunc2, self).__init__()
  263. self.unique = ops.unique
  264. def construct(self, x):
  265. y, idx = self.unique(x)
  266. return y, idx
  267. class RangeNet(Cell):
  268. def __init__(self):
  269. super(RangeNet, self).__init__()
  270. self.range_ops = inner.Range(1.0, 8.0, 2.0)
  271. def construct(self, x):
  272. return self.range_ops(x)
  273. test_case_array_ops = [
  274. ('CustNet1', {
  275. 'block': CustNet1(),
  276. 'desc_inputs': []}),
  277. ('CustNet2', {
  278. 'block': CustNet2(),
  279. 'desc_inputs': []}),
  280. ('CustNet3', {
  281. 'block': CustNet3(),
  282. 'desc_inputs': []}),
  283. ('Unique', {
  284. 'block': UniqueFunc1(),
  285. 'desc_inputs': [Tensor(np.array([2, 2, 1]), dtype=ms.int32)]}),
  286. ('Unique', {
  287. 'block': UniqueFunc2(),
  288. 'desc_inputs': [Tensor(np.array([[2, 2], [1, 3]]), dtype=ms.int32)]}),
  289. ('MathBinaryNet1', {
  290. 'block': MathBinaryNet1(),
  291. 'desc_inputs': [Tensor(np.ones([2, 2]), dtype=ms.int32)]}),
  292. ('MathBinaryNet2', {
  293. 'block': MathBinaryNet2(),
  294. 'desc_inputs': [Tensor(np.ones([2, 2]), dtype=ms.int32)]}),
  295. ('BatchToSpaceNet', {
  296. 'block': BatchToSpaceNet(),
  297. 'desc_inputs': [Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]).astype(np.float16))]}),
  298. ('SpaceToBatchNet', {
  299. 'block': SpaceToBatchNet(),
  300. 'desc_inputs': [Tensor(np.array([[[[1, 2], [3, 4]]]]).astype(np.float16))]}),
  301. ('PackNet', {
  302. 'block': PackNet(),
  303. 'desc_inputs': [Tensor(np.array([[[1, 2], [3, 4]]]).astype(np.float16))]}),
  304. ('UnpackNet', {
  305. 'block': UnpackNet(),
  306. 'desc_inputs': [Tensor(np.array([[1, 2], [3, 4]]).astype(np.float16))]}),
  307. ('SpaceToDepthNet', {
  308. 'block': SpaceToDepthNet(),
  309. 'desc_inputs': [Tensor(np.random.rand(1, 3, 2, 2).astype(np.float16))]}),
  310. ('DepthToSpaceNet', {
  311. 'block': DepthToSpaceNet(),
  312. 'desc_inputs': [Tensor(np.random.rand(1, 12, 1, 1).astype(np.float16))]}),
  313. ('SpaceToBatchNDNet', {
  314. 'block': SpaceToBatchNDNet(),
  315. 'desc_inputs': [Tensor(np.random.rand(1, 1, 2, 2).astype(np.float16))]}),
  316. ('BatchToSpaceNDNet', {
  317. 'block': BatchToSpaceNDNet(),
  318. 'desc_inputs': [Tensor(np.random.rand(4, 1, 1, 1).astype(np.float16))]}),
  319. ('RangeNet', {
  320. 'block': RangeNet(),
  321. 'desc_inputs': [Tensor(np.array([1, 2, 3, 2]), ms.int32)]}),
  322. ('TensorShapeNet', {'block': TensorShapeNet(), 'desc_inputs': [Tensor(np.array([1, 2, 3, 2]), ms.int32)]})
  323. ]
  324. test_case_lists = [test_case_array_ops]
  325. test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists)
  326. # use -k to select certain testcast
  327. # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm
  328. @non_graph_engine
  329. @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
  330. def test_exec():
  331. context.set_context(mode=context.GRAPH_MODE)
  332. return test_exec_case
  333. raise_set = [
  334. ('Squeeze_1_Error', {
  335. 'block': (lambda x: P.Squeeze(axis=1.2), {'exception': TypeError}),
  336. 'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]}),
  337. ('Squeeze_2_Error', {
  338. 'block': (lambda x: P.Squeeze(axis=((1.2, 1.3))), {'exception': TypeError}),
  339. 'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]}),
  340. ('ReduceSum_Error', {
  341. 'block': (lambda x: P.ReduceSum(keep_dims=1), {'exception': TypeError}),
  342. 'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]}),
  343. ('TensorShapeNet_Error', {'block': (lambda x: P.TensorSHape(), {'exception': TypeError}),
  344. 'desc_inputs': [(Tensor(np.ones(shape=[3, 1, 5])), Tensor(np.ones(shape=[3, 1, 5])))]})
  345. ]
  346. @mindspore_test(pipeline_for_verify_exception_for_case_by_case_config)
  347. def test_check_exception():
  348. return raise_set