You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_array_ops.py 12 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """ test array ops """
  16. import functools
  17. import numpy as np
  18. import pytest
  19. from mindspore.ops.signature import sig_rw, sig_dtype, make_sig
  20. import mindspore as ms
  21. from mindspore import Tensor
  22. from mindspore.common import dtype as mstype
  23. from mindspore.nn import Cell
  24. from mindspore.ops import operations as P
  25. from mindspore.ops.operations import _inner_ops as inner
  26. from mindspore.ops import prim_attr_register
  27. from mindspore.ops.primitive import PrimitiveWithInfer
  28. import mindspore.context as context
  29. from ..ut_filter import non_graph_engine
  30. from ....mindspore_test_framework.mindspore_test import mindspore_test
  31. from ....mindspore_test_framework.pipeline.forward.compile_forward \
  32. import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
  33. from ....mindspore_test_framework.pipeline.forward.verify_exception \
  34. import pipeline_for_verify_exception_for_case_by_case_config
  35. def test_expand_dims():
  36. input_tensor = Tensor(np.array([[2, 2], [2, 2]]))
  37. expand_dims = P.ExpandDims()
  38. output = expand_dims(input_tensor, 0)
  39. assert output.asnumpy().shape == (1, 2, 2)
  40. def test_cast():
  41. input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
  42. input_x = Tensor(input_np)
  43. td = ms.int32
  44. cast = P.Cast()
  45. result = cast(input_x, td)
  46. expect = input_np.astype(np.int32)
  47. assert np.all(result.asnumpy() == expect)
  48. def test_ones():
  49. ones = P.Ones()
  50. output = ones((2, 3), mstype.int32)
  51. assert output.asnumpy().shape == (2, 3)
  52. assert np.sum(output.asnumpy()) == 6
  53. def test_zeros():
  54. zeros = P.Zeros()
  55. output = zeros((2, 3), mstype.int32)
  56. assert output.asnumpy().shape == (2, 3)
  57. assert np.sum(output.asnumpy()) == 0
  58. @non_graph_engine
  59. def test_reshape():
  60. input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]))
  61. shp = (3, 2)
  62. reshape = P.Reshape()
  63. output = reshape(input_tensor, shp)
  64. assert output.asnumpy().shape == (3, 2)
  65. def test_transpose():
  66. input_tensor = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]))
  67. perm = (0, 2, 1)
  68. expect = np.array([[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]])
  69. transpose = P.Transpose()
  70. output = transpose(input_tensor, perm)
  71. assert np.all(output.asnumpy() == expect)
  72. def test_squeeze():
  73. input_tensor = Tensor(np.ones(shape=[3, 2, 1]))
  74. squeeze = P.Squeeze(2)
  75. output = squeeze(input_tensor)
  76. assert output.asnumpy().shape == (3, 2)
  77. def test_invert_permutation():
  78. invert_permutation = P.InvertPermutation()
  79. x = (3, 4, 0, 2, 1)
  80. output = invert_permutation(x)
  81. expect = (2, 4, 3, 0, 1)
  82. assert np.all(output == expect)
  83. def test_select():
  84. select = P.Select()
  85. cond = Tensor(np.array([[True, False, False], [False, True, True]]))
  86. x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]))
  87. y = Tensor(np.array([[7, 8, 9], [10, 11, 12]]))
  88. output = select(cond, x, y)
  89. expect = np.array([[1, 8, 9], [10, 5, 6]])
  90. assert np.all(output.asnumpy() == expect)
  91. def test_argmin_invalid_output_type():
  92. P.Argmin(-1, mstype.int64)
  93. P.Argmin(-1, mstype.int32)
  94. with pytest.raises(TypeError):
  95. P.Argmin(-1, mstype.float32)
  96. with pytest.raises(TypeError):
  97. P.Argmin(-1, mstype.float64)
  98. with pytest.raises(TypeError):
  99. P.Argmin(-1, mstype.uint8)
  100. with pytest.raises(TypeError):
  101. P.Argmin(-1, mstype.bool_)
  102. class CustomOP(PrimitiveWithInfer):
  103. __mindspore_signature__ = (sig_dtype.T, sig_dtype.T, sig_dtype.T1,
  104. sig_dtype.T1, sig_dtype.T2, sig_dtype.T2,
  105. sig_dtype.T2, sig_dtype.T3, sig_dtype.T4)
  106. @prim_attr_register
  107. def __init__(self):
  108. pass
  109. def __call__(self, p1, p2, p3, p4, p5, p6, p7, p8, p9):
  110. raise NotImplementedError
  111. class CustomOP2(PrimitiveWithInfer):
  112. __mindspore_signature__ = (
  113. make_sig('p1', sig_rw.RW_WRITE, dtype=sig_dtype.T),
  114. make_sig('p2', dtype=sig_dtype.T),
  115. make_sig('p3', dtype=sig_dtype.T),
  116. )
  117. @prim_attr_register
  118. def __init__(self):
  119. pass
  120. def __call__(self, p1, p2, p3):
  121. raise NotImplementedError
  122. class CustNet1(Cell):
  123. def __init__(self):
  124. super(CustNet1, self).__init__()
  125. self.op = CustomOP()
  126. self.t1 = Tensor(np.ones([2, 2]), dtype=ms.int32)
  127. self.t2 = Tensor(np.ones([1, 5]), dtype=ms.float16)
  128. self.int1 = 3
  129. self.float1 = 5.1
  130. def construct(self):
  131. x = self.op(self.t1, self.t1, self.int1,
  132. self.float1, self.int1, self.float1,
  133. self.t2, self.t1, self.int1)
  134. return x
  135. class CustNet2(Cell):
  136. def __init__(self):
  137. super(CustNet2, self).__init__()
  138. self.op = CustomOP2()
  139. self.t1 = Tensor(np.ones([2, 2]), dtype=ms.int32)
  140. self.t2 = Tensor(np.ones([1, 5]), dtype=ms.float16)
  141. self.int1 = 3
  142. def construct(self):
  143. return self.op(self.t1, self.t2, self.int1)
  144. class CustNet3(Cell):
  145. def __init__(self):
  146. super(CustNet3, self).__init__()
  147. self.op = P.ReduceSum()
  148. self.t1 = Tensor(np.ones([2, 2]), dtype=ms.int32)
  149. self.t2 = Tensor(np.ones([1, 5]), dtype=ms.float16)
  150. self.t2 = 1
  151. def construct(self):
  152. return self.op(self.t1, self.t2)
  153. class MathBinaryNet1(Cell):
  154. def __init__(self):
  155. super(MathBinaryNet1, self).__init__()
  156. self.add = P.TensorAdd()
  157. self.mul = P.Mul()
  158. self.max = P.Maximum()
  159. self.number = 3
  160. def construct(self, x):
  161. return self.add(x, self.number) + self.mul(x, self.number) + self.max(x, self.number)
  162. class MathBinaryNet2(Cell):
  163. def __init__(self):
  164. super(MathBinaryNet2, self).__init__()
  165. self.less_equal = P.LessEqual()
  166. self.greater = P.Greater()
  167. self.logic_or = P.LogicalOr()
  168. self.logic_and = P.LogicalAnd()
  169. self.number = 3
  170. self.flag = True
  171. def construct(self, x):
  172. ret_less_equal = self.logic_and(self.less_equal(x, self.number), self.flag)
  173. ret_greater = self.logic_or(self.greater(x, self.number), self.flag)
  174. return self.logic_or(ret_less_equal, ret_greater)
  175. class BatchToSpaceNet(Cell):
  176. def __init__(self):
  177. super(BatchToSpaceNet, self).__init__()
  178. block_size = 2
  179. crops = [[0, 0], [0, 0]]
  180. self.batch_to_space = P.BatchToSpace(block_size, crops)
  181. def construct(self, x):
  182. return self.batch_to_space(x)
  183. class SpaceToBatchNet(Cell):
  184. def __init__(self):
  185. super(SpaceToBatchNet, self).__init__()
  186. block_size = 2
  187. paddings = [[0, 0], [0, 0]]
  188. self.space_to_batch = P.SpaceToBatch(block_size, paddings)
  189. def construct(self, x):
  190. return self.space_to_batch(x)
  191. class PackNet(Cell):
  192. def __init__(self):
  193. super(PackNet, self).__init__()
  194. self.pack = P.Pack()
  195. def construct(self, x):
  196. return self.pack((x, x))
  197. class UnpackNet(Cell):
  198. def __init__(self):
  199. super(UnpackNet, self).__init__()
  200. self.unpack = P.Unpack()
  201. def construct(self, x):
  202. return self.unpack(x)
  203. class SpaceToDepthNet(Cell):
  204. def __init__(self):
  205. super(SpaceToDepthNet, self).__init__()
  206. block_size = 2
  207. self.space_to_depth = P.SpaceToDepth(block_size)
  208. def construct(self, x):
  209. return self.space_to_depth(x)
  210. class DepthToSpaceNet(Cell):
  211. def __init__(self):
  212. super(DepthToSpaceNet, self).__init__()
  213. block_size = 2
  214. self.depth_to_space = P.DepthToSpace(block_size)
  215. def construct(self, x):
  216. return self.depth_to_space(x)
  217. class BatchToSpaceNDNet(Cell):
  218. def __init__(self):
  219. super(BatchToSpaceNDNet, self).__init__()
  220. block_shape = [2, 2]
  221. crops = [[0, 0], [0, 0]]
  222. self.batch_to_space_nd = P.BatchToSpaceND(block_shape, crops)
  223. def construct(self, x):
  224. return self.batch_to_space_nd(x)
  225. class SpaceToBatchNDNet(Cell):
  226. def __init__(self):
  227. super(SpaceToBatchNDNet, self).__init__()
  228. block_shape = [2, 2]
  229. paddings = [[0, 0], [0, 0]]
  230. self.space_to_batch_nd = P.SpaceToBatchND(block_shape, paddings)
  231. def construct(self, x):
  232. return self.space_to_batch_nd(x)
  233. class RangeNet(Cell):
  234. def __init__(self):
  235. super(RangeNet, self).__init__()
  236. self.range_ops = inner.Range(1.0, 8.0, 2.0)
  237. def construct(self, x):
  238. return self.range_ops(x)
  239. test_case_array_ops = [
  240. ('CustNet1', {
  241. 'block': CustNet1(),
  242. 'desc_inputs': []}),
  243. ('CustNet2', {
  244. 'block': CustNet2(),
  245. 'desc_inputs': []}),
  246. ('CustNet3', {
  247. 'block': CustNet3(),
  248. 'desc_inputs': []}),
  249. ('MathBinaryNet1', {
  250. 'block': MathBinaryNet1(),
  251. 'desc_inputs': [Tensor(np.ones([2, 2]), dtype=ms.int32)]}),
  252. ('MathBinaryNet2', {
  253. 'block': MathBinaryNet2(),
  254. 'desc_inputs': [Tensor(np.ones([2, 2]), dtype=ms.int32)]}),
  255. ('BatchToSpaceNet', {
  256. 'block': BatchToSpaceNet(),
  257. 'desc_inputs': [Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]).astype(np.float16))]}),
  258. ('SpaceToBatchNet', {
  259. 'block': SpaceToBatchNet(),
  260. 'desc_inputs': [Tensor(np.array([[[[1, 2], [3, 4]]]]).astype(np.float16))]}),
  261. ('PackNet', {
  262. 'block': PackNet(),
  263. 'desc_inputs': [Tensor(np.array([[[1, 2], [3, 4]]]).astype(np.float16))]}),
  264. ('UnpackNet', {
  265. 'block': UnpackNet(),
  266. 'desc_inputs': [Tensor(np.array([[1, 2], [3, 4]]).astype(np.float16))]}),
  267. ('SpaceToDepthNet', {
  268. 'block': SpaceToDepthNet(),
  269. 'desc_inputs': [Tensor(np.random.rand(1, 3, 2, 2).astype(np.float16))]}),
  270. ('DepthToSpaceNet', {
  271. 'block': DepthToSpaceNet(),
  272. 'desc_inputs': [Tensor(np.random.rand(1, 12, 1, 1).astype(np.float16))]}),
  273. ('SpaceToBatchNDNet', {
  274. 'block': SpaceToBatchNDNet(),
  275. 'desc_inputs': [Tensor(np.random.rand(1, 1, 2, 2).astype(np.float16))]}),
  276. ('BatchToSpaceNDNet', {
  277. 'block': BatchToSpaceNDNet(),
  278. 'desc_inputs': [Tensor(np.random.rand(4, 1, 1, 1).astype(np.float16))]}),
  279. ('RangeNet', {
  280. 'block': RangeNet(),
  281. 'desc_inputs': [Tensor(np.array([1, 2, 3, 2]), ms.int32)]}),
  282. ]
  283. test_case_lists = [test_case_array_ops]
  284. test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists)
  285. # use -k to select certain testcast
  286. # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm
  287. @non_graph_engine
  288. @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
  289. def test_exec():
  290. context.set_context(mode=context.GRAPH_MODE)
  291. return test_exec_case
  292. raise_set = [
  293. ('Squeeze_1_Error', {
  294. 'block': (lambda x: P.Squeeze(axis=1.2), {'exception': TypeError}),
  295. 'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]}),
  296. ('Squeeze_2_Error', {
  297. 'block': (lambda x: P.Squeeze(axis=((1.2, 1.3))), {'exception': TypeError}),
  298. 'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]}),
  299. ('ReduceSum_Error', {
  300. 'block': (lambda x: P.ReduceSum(keep_dims=1), {'exception': TypeError}),
  301. 'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]}),
  302. ]
  303. @mindspore_test(pipeline_for_verify_exception_for_case_by_case_config)
  304. def test_check_exception():
  305. return raise_set