You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_array_ops.py 12 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """ test array ops """
  16. import functools
  17. import numpy as np
  18. import pytest
  19. from mindspore.ops.signature import sig_rw, sig_dtype, make_sig
  20. import mindspore as ms
  21. from mindspore import Tensor
  22. from mindspore.common import dtype as mstype
  23. from mindspore.nn import Cell
  24. from mindspore.ops import operations as P
  25. from mindspore.ops.operations import _inner_ops as inner
  26. from mindspore.ops import prim_attr_register
  27. from mindspore.ops.primitive import PrimitiveWithInfer
  28. import mindspore.context as context
  29. from ..ut_filter import non_graph_engine
  30. from ....mindspore_test_framework.mindspore_test import mindspore_test
  31. from ....mindspore_test_framework.pipeline.forward.compile_forward \
  32. import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
  33. from ....mindspore_test_framework.pipeline.forward.verify_exception \
  34. import pipeline_for_verify_exception_for_case_by_case_config
  35. def test_expand_dims():
  36. input_tensor = Tensor(np.array([[2, 2], [2, 2]]))
  37. expand_dims = P.ExpandDims()
  38. output = expand_dims(input_tensor, 0)
  39. assert output.asnumpy().shape == (1, 2, 2)
  40. def test_cast():
  41. input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
  42. input_x = Tensor(input_np)
  43. td = ms.int32
  44. cast = P.Cast()
  45. result = cast(input_x, td)
  46. expect = input_np.astype(np.int32)
  47. assert np.all(result.asnumpy() == expect)
  48. @non_graph_engine
  49. def test_reshape():
  50. input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]))
  51. shp = (3, 2)
  52. reshape = P.Reshape()
  53. output = reshape(input_tensor, shp)
  54. assert output.asnumpy().shape == (3, 2)
  55. def test_transpose():
  56. input_tensor = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]))
  57. perm = (0, 2, 1)
  58. expect = np.array([[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]])
  59. transpose = P.Transpose()
  60. output = transpose(input_tensor, perm)
  61. assert np.all(output.asnumpy() == expect)
  62. def test_squeeze():
  63. input_tensor = Tensor(np.ones(shape=[3, 2, 1]))
  64. squeeze = P.Squeeze(2)
  65. output = squeeze(input_tensor)
  66. assert output.asnumpy().shape == (3, 2)
  67. def test_invert_permutation():
  68. invert_permutation = P.InvertPermutation()
  69. x = (3, 4, 0, 2, 1)
  70. output = invert_permutation(x)
  71. expect = (2, 4, 3, 0, 1)
  72. assert np.all(output == expect)
  73. def test_select():
  74. select = P.Select()
  75. cond = Tensor(np.array([[True, False, False], [False, True, True]]))
  76. x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]))
  77. y = Tensor(np.array([[7, 8, 9], [10, 11, 12]]))
  78. output = select(cond, x, y)
  79. expect = np.array([[1, 8, 9], [10, 5, 6]])
  80. assert np.all(output.asnumpy() == expect)
  81. def test_argmin_invalid_output_type():
  82. P.Argmin(-1, mstype.int64)
  83. P.Argmin(-1, mstype.int32)
  84. with pytest.raises(TypeError):
  85. P.Argmin(-1, mstype.float32)
  86. with pytest.raises(TypeError):
  87. P.Argmin(-1, mstype.float64)
  88. with pytest.raises(TypeError):
  89. P.Argmin(-1, mstype.uint8)
  90. with pytest.raises(TypeError):
  91. P.Argmin(-1, mstype.bool_)
  92. class CustomOP(PrimitiveWithInfer):
  93. __mindspore_signature__ = (sig_dtype.T, sig_dtype.T, sig_dtype.T1,
  94. sig_dtype.T1, sig_dtype.T2, sig_dtype.T2,
  95. sig_dtype.T2, sig_dtype.T3, sig_dtype.T4)
  96. @prim_attr_register
  97. def __init__(self):
  98. pass
  99. def __call__(self, p1, p2, p3, p4, p5, p6, p7, p8, p9):
  100. raise NotImplementedError
  101. class CustomOP2(PrimitiveWithInfer):
  102. __mindspore_signature__ = (
  103. make_sig('p1', sig_rw.RW_WRITE, dtype=sig_dtype.T),
  104. make_sig('p2', dtype=sig_dtype.T),
  105. make_sig('p3', dtype=sig_dtype.T),
  106. )
  107. @prim_attr_register
  108. def __init__(self):
  109. pass
  110. def __call__(self, p1, p2, p3):
  111. raise NotImplementedError
  112. class CustNet1(Cell):
  113. def __init__(self):
  114. super(CustNet1, self).__init__()
  115. self.op = CustomOP()
  116. self.t1 = Tensor(np.ones([2, 2]), dtype=ms.int32)
  117. self.t2 = Tensor(np.ones([1, 5]), dtype=ms.float16)
  118. self.int1 = 3
  119. self.float1 = 5.1
  120. def construct(self):
  121. x = self.op(self.t1, self.t1, self.int1,
  122. self.float1, self.int1, self.float1,
  123. self.t2, self.t1, self.int1)
  124. return x
  125. class CustNet2(Cell):
  126. def __init__(self):
  127. super(CustNet2, self).__init__()
  128. self.op = CustomOP2()
  129. self.t1 = Tensor(np.ones([2, 2]), dtype=ms.int32)
  130. self.t2 = Tensor(np.ones([1, 5]), dtype=ms.float16)
  131. self.int1 = 3
  132. def construct(self):
  133. return self.op(self.t1, self.t2, self.int1)
  134. class CustNet3(Cell):
  135. def __init__(self):
  136. super(CustNet3, self).__init__()
  137. self.op = P.ReduceSum()
  138. self.t1 = Tensor(np.ones([2, 2]), dtype=ms.int32)
  139. self.t2 = Tensor(np.ones([1, 5]), dtype=ms.float16)
  140. self.t2 = 1
  141. def construct(self):
  142. return self.op(self.t1, self.t2)
  143. class MathBinaryNet1(Cell):
  144. def __init__(self):
  145. super(MathBinaryNet1, self).__init__()
  146. self.add = P.TensorAdd()
  147. self.mul = P.Mul()
  148. self.max = P.Maximum()
  149. self.number = 3
  150. def construct(self, x):
  151. return self.add(x, self.number) + self.mul(x, self.number) + self.max(x, self.number)
  152. class MathBinaryNet2(Cell):
  153. def __init__(self):
  154. super(MathBinaryNet2, self).__init__()
  155. self.less_equal = P.LessEqual()
  156. self.greater = P.Greater()
  157. self.logic_or = P.LogicalOr()
  158. self.logic_and = P.LogicalAnd()
  159. self.number = 3
  160. self.flag = True
  161. def construct(self, x):
  162. ret_less_equal = self.logic_and(self.less_equal(x, self.number), self.flag)
  163. ret_greater = self.logic_or(self.greater(x, self.number), self.flag)
  164. return self.logic_or(ret_less_equal, ret_greater)
  165. class BatchToSpaceNet(Cell):
  166. def __init__(self):
  167. super(BatchToSpaceNet, self).__init__()
  168. block_size = 2
  169. crops = [[0, 0], [0, 0]]
  170. self.batch_to_space = P.BatchToSpace(block_size, crops)
  171. def construct(self, x):
  172. return self.batch_to_space(x)
  173. class SpaceToBatchNet(Cell):
  174. def __init__(self):
  175. super(SpaceToBatchNet, self).__init__()
  176. block_size = 2
  177. paddings = [[0, 0], [0, 0]]
  178. self.space_to_batch = P.SpaceToBatch(block_size, paddings)
  179. def construct(self, x):
  180. return self.space_to_batch(x)
  181. class PackNet(Cell):
  182. def __init__(self):
  183. super(PackNet, self).__init__()
  184. self.pack = P.Pack()
  185. def construct(self, x):
  186. return self.pack((x, x))
  187. class UnpackNet(Cell):
  188. def __init__(self):
  189. super(UnpackNet, self).__init__()
  190. self.unpack = P.Unpack()
  191. def construct(self, x):
  192. return self.unpack(x)
  193. class SpaceToDepthNet(Cell):
  194. def __init__(self):
  195. super(SpaceToDepthNet, self).__init__()
  196. block_size = 2
  197. self.space_to_depth = P.SpaceToDepth(block_size)
  198. def construct(self, x):
  199. return self.space_to_depth(x)
  200. class DepthToSpaceNet(Cell):
  201. def __init__(self):
  202. super(DepthToSpaceNet, self).__init__()
  203. block_size = 2
  204. self.depth_to_space = P.DepthToSpace(block_size)
  205. def construct(self, x):
  206. return self.depth_to_space(x)
  207. class BatchToSpaceNDNet(Cell):
  208. def __init__(self):
  209. super(BatchToSpaceNDNet, self).__init__()
  210. block_shape = [2, 2]
  211. crops = [[0, 0], [0, 0]]
  212. self.batch_to_space_nd = P.BatchToSpaceND(block_shape, crops)
  213. def construct(self, x):
  214. return self.batch_to_space_nd(x)
  215. class SpaceToBatchNDNet(Cell):
  216. def __init__(self):
  217. super(SpaceToBatchNDNet, self).__init__()
  218. block_shape = [2, 2]
  219. paddings = [[0, 0], [0, 0]]
  220. self.space_to_batch_nd = P.SpaceToBatchND(block_shape, paddings)
  221. def construct(self, x):
  222. return self.space_to_batch_nd(x)
  223. class RangeNet(Cell):
  224. def __init__(self):
  225. super(RangeNet, self).__init__()
  226. self.range_ops = inner.Range(1.0, 8.0, 2.0)
  227. def construct(self, x):
  228. return self.range_ops(x)
  229. test_case_array_ops = [
  230. ('CustNet1', {
  231. 'block': CustNet1(),
  232. 'desc_inputs': []}),
  233. ('CustNet2', {
  234. 'block': CustNet2(),
  235. 'desc_inputs': []}),
  236. ('CustNet3', {
  237. 'block': CustNet3(),
  238. 'desc_inputs': []}),
  239. ('MathBinaryNet1', {
  240. 'block': MathBinaryNet1(),
  241. 'desc_inputs': [Tensor(np.ones([2, 2]), dtype=ms.int32)]}),
  242. ('MathBinaryNet2', {
  243. 'block': MathBinaryNet2(),
  244. 'desc_inputs': [Tensor(np.ones([2, 2]), dtype=ms.int32)]}),
  245. ('BatchToSpaceNet', {
  246. 'block': BatchToSpaceNet(),
  247. 'desc_inputs': [Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]).astype(np.float16))]}),
  248. ('SpaceToBatchNet', {
  249. 'block': SpaceToBatchNet(),
  250. 'desc_inputs': [Tensor(np.array([[[[1, 2], [3, 4]]]]).astype(np.float16))]}),
  251. ('PackNet', {
  252. 'block': PackNet(),
  253. 'desc_inputs': [Tensor(np.array([[[1, 2], [3, 4]]]).astype(np.float16))]}),
  254. ('UnpackNet', {
  255. 'block': UnpackNet(),
  256. 'desc_inputs': [Tensor(np.array([[1, 2], [3, 4]]).astype(np.float16))]}),
  257. ('SpaceToDepthNet', {
  258. 'block': SpaceToDepthNet(),
  259. 'desc_inputs': [Tensor(np.random.rand(1, 3, 2, 2).astype(np.float16))]}),
  260. ('DepthToSpaceNet', {
  261. 'block': DepthToSpaceNet(),
  262. 'desc_inputs': [Tensor(np.random.rand(1, 12, 1, 1).astype(np.float16))]}),
  263. ('SpaceToBatchNDNet', {
  264. 'block': SpaceToBatchNDNet(),
  265. 'desc_inputs': [Tensor(np.random.rand(1, 1, 2, 2).astype(np.float16))]}),
  266. ('BatchToSpaceNDNet', {
  267. 'block': BatchToSpaceNDNet(),
  268. 'desc_inputs': [Tensor(np.random.rand(4, 1, 1, 1).astype(np.float16))]}),
  269. ('RangeNet', {
  270. 'block': RangeNet(),
  271. 'desc_inputs': [Tensor(np.array([1, 2, 3, 2]), ms.int32)]}),
  272. ]
  273. test_case_lists = [test_case_array_ops]
  274. test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists)
  275. # use -k to select certain testcast
  276. # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm
  277. @non_graph_engine
  278. @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
  279. def test_exec():
  280. context.set_context(mode=context.GRAPH_MODE)
  281. return test_exec_case
  282. raise_set = [
  283. ('Squeeze_1_Error', {
  284. 'block': (lambda x: P.Squeeze(axis=1.2), {'exception': TypeError}),
  285. 'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]}),
  286. ('Squeeze_2_Error', {
  287. 'block': (lambda x: P.Squeeze(axis=((1.2, 1.3))), {'exception': TypeError}),
  288. 'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]}),
  289. ('ReduceSum_Error', {
  290. 'block': (lambda x: P.ReduceSum(keep_dims=1), {'exception': TypeError}),
  291. 'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]}),
  292. ]
  293. @mindspore_test(pipeline_for_verify_exception_for_case_by_case_config)
  294. def test_check_exception():
  295. return raise_set