You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_math_ops.py 12 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """ test math ops """
  16. import functools
  17. import numpy as np
  18. import mindspore as ms
  19. import mindspore.nn as nn
  20. from mindspore.common import dtype as mstype
  21. from mindspore.ops import prim_attr_register, PrimitiveWithInfer
  22. from mindspore import Tensor
  23. from mindspore.ops import composite as C
  24. from mindspore.ops import operations as P
  25. from mindspore.ops import functional as F
  26. import mindspore.context as context
  27. from ..ut_filter import non_graph_engine
  28. from ....mindspore_test_framework.mindspore_test import mindspore_test
  29. from ....mindspore_test_framework.pipeline.forward.compile_forward \
  30. import pipeline_for_compile_forward_ge_graph_for_case_by_case_config
  31. from ....mindspore_test_framework.pipeline.forward.verify_exception \
  32. import pipeline_for_verify_exception_for_case_by_case_config
  33. # pylint: disable=W0613
  34. # pylint: disable=W0231
  35. # W0613: unused-argument
  36. # W0231: super-init-not-called
  37. def test_multiply():
  38. """ test_multiply """
  39. input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]))
  40. input_y = Tensor(np.array([[0.1, 0.3, -3.6], [0.4, 0.5, -3.2]]))
  41. mul = P.Mul()
  42. result = mul(input_x, input_y)
  43. expect = np.array([[-0.01, 0.09, -12.96], [0.16, 0.25, 10.24]])
  44. diff = result.asnumpy() - expect
  45. error = np.ones(shape=[2, 3]) * 1.0e-6
  46. assert np.all(diff < error)
  47. assert np.all(-diff < error)
  48. def test_sub():
  49. """ test_sub """
  50. input_x = Tensor(np.ones(shape=[3]))
  51. input_y = Tensor(np.zeros(shape=[3]))
  52. sub = P.Sub()
  53. result = sub(input_x, input_y)
  54. expect = np.ones(shape=[3])
  55. assert np.all(result.asnumpy() == expect)
  56. def test_square():
  57. """ test_square """
  58. input_tensor = Tensor(np.array([[1, 2, 3], [4, 5, 6]]))
  59. square = P.Square()
  60. result = square(input_tensor)
  61. expect = np.array([[1, 4, 9], [16, 25, 36]])
  62. assert np.all(result.asnumpy() == expect)
  63. def test_sqrt():
  64. """ test_sqrt """
  65. input_tensor = Tensor(np.array([[4, 4], [9, 9]]))
  66. sqrt = P.Sqrt()
  67. expect = np.array([[2, 2], [3, 3]])
  68. result = sqrt(input_tensor)
  69. assert np.all(result.asnumpy() == expect)
  70. def test_pow():
  71. """ test_pow """
  72. input_tensor = Tensor(np.array([[2, 2], [3, 3]]))
  73. power = Tensor(np.array(3.0, np.int64))
  74. testpow = P.Pow()
  75. expect = np.array([[8, 8], [27, 27]])
  76. result = testpow(input_tensor, power)
  77. assert np.all(result.asnumpy() == expect)
  78. def test_exp():
  79. """ test_exp """
  80. input_tensor = Tensor(np.array([[2, 2], [3, 3]]))
  81. testexp = P.Exp()
  82. result = testexp(input_tensor)
  83. expect = np.exp(np.array([[2, 2], [3, 3]]))
  84. assert np.all(result.asnumpy() == expect)
  85. def test_realdiv():
  86. """ test_realdiv """
  87. x = Tensor(2048.0)
  88. y = Tensor(128.0)
  89. div = P.RealDiv()
  90. result = div(x, y)
  91. x = x.asnumpy()
  92. y = y.asnumpy()
  93. expect = x/y
  94. assert np.all(result.asnumpy() == expect)
  95. def test_eye():
  96. """ test_eye """
  97. x = np.arange(3)
  98. expect = np.ones_like(x)
  99. expect = np.diag(expect)
  100. eye = P.Eye()
  101. eye_output = eye(3, 3, ms.float32)
  102. assert np.all(eye_output.asnumpy() == expect)
  103. class VirtualLossGrad(PrimitiveWithInfer):
  104. """ VirtualLossGrad definition """
  105. @prim_attr_register
  106. def __init__(self):
  107. """init VirtualLossGrad"""
  108. def __call__(self, x, out, dout):
  109. raise NotImplementedError
  110. def infer_shape(self, x_shape, out_shape, dout_shape):
  111. return x_shape
  112. def infer_dtype(self, x_dtype, out_dtype, dout_dtype):
  113. return x_dtype
  114. class VirtualLoss(PrimitiveWithInfer):
  115. """ VirtualLoss definition """
  116. @prim_attr_register
  117. def __init__(self):
  118. """init VirtualLoss"""
  119. def __call__(self, x):
  120. raise NotImplementedError
  121. def get_bprop(self):
  122. loss_grad = VirtualLossGrad()
  123. def bprop(x, out, dout):
  124. dx = loss_grad(x, out, dout)
  125. return (dx,)
  126. return bprop
  127. def infer_shape(self, x_shape):
  128. return [1]
  129. def infer_dtype(self, x_dtype):
  130. return x_dtype
  131. class NetWithLoss(nn.Cell):
  132. """ NetWithLoss definition """
  133. def __init__(self, network):
  134. super(NetWithLoss, self).__init__()
  135. self.loss = VirtualLoss()
  136. self.network = network
  137. def construct(self, x, y, b):
  138. predict = self.network(x, y, b)
  139. return self.loss(predict)
  140. class GradWrap(nn.Cell):
  141. """ GradWrap definition """
  142. def __init__(self, network):
  143. super(GradWrap, self).__init__()
  144. self.network = network
  145. def construct(self, x, y, b):
  146. return C.grad(self.network)(x, y, b)
  147. class MatMulNet(nn.Cell):
  148. """ MatMulNet definition """
  149. def __init__(self):
  150. super(MatMulNet, self).__init__()
  151. self.matmul = P.MatMul()
  152. self.biasAdd = P.BiasAdd()
  153. def construct(self, x, y, b):
  154. return self.biasAdd(self.matmul(x, y), b)
  155. class NetWithLossSub(nn.Cell):
  156. """ NetWithLossSub definition """
  157. def __init__(self, network):
  158. super(NetWithLossSub, self).__init__()
  159. self.loss = VirtualLoss()
  160. self.network = network
  161. def construct(self, x, y):
  162. predict = self.network(x, y)
  163. return self.loss(predict)
  164. class GradWrapSub(nn.Cell):
  165. """ GradWrapSub definition """
  166. def __init__(self, network):
  167. super(GradWrapSub, self).__init__()
  168. self.network = network
  169. def construct(self, x, y):
  170. return C.grad(self.network)(x, y)
  171. class SubNet(nn.Cell):
  172. """ SubNet definition """
  173. def __init__(self):
  174. super(SubNet, self).__init__()
  175. self.sub = P.Sub()
  176. def construct(self, x, y):
  177. return self.sub(x, y)
  178. class NpuFloatNet(nn.Cell):
  179. """ NpuFloat definition """
  180. def __init__(self):
  181. super(NpuFloatNet, self).__init__()
  182. self.mul = P.Mul()
  183. self.alloc_status = P.NPUAllocFloatStatus()
  184. self.get_status = P.NPUGetFloatStatus()
  185. self.clear_status = P.NPUClearFloatStatus()
  186. self.fill = P.Fill()
  187. self.shape_op = P.Shape()
  188. self.select = P.Select()
  189. self.less = P.Less()
  190. self.cast = P.Cast()
  191. self.dtype = P.DType()
  192. self.reduce_sum = P.ReduceSum(keep_dims=True)
  193. self.sub = P.Sub()
  194. self.neg = P.Neg()
  195. self.add_flags(has_effect=True)
  196. def construct(self, x):
  197. init = self.alloc_status()
  198. self.clear_status(init)
  199. res = self.sub(x, self.neg(x))
  200. self.get_status(init)
  201. flag_sum = self.reduce_sum(init, (0,))
  202. base = self.cast(self.fill(self.dtype(res), self.shape_op(res), 0.0), self.dtype(flag_sum))
  203. cond = self.less(base, flag_sum)
  204. out = self.select(cond, self.cast(base, self.dtype(res)), res)
  205. return out
  206. class DiagNet(nn.Cell):
  207. """ DiagNet definition """
  208. def __init__(self):
  209. super(DiagNet, self).__init__()
  210. self.fill = P.Fill()
  211. self.diag = P.Diag()
  212. def construct(self, x):
  213. return x - self.diag(self.fill(mstype.float32, (3,), 1.0))
  214. class NetWithLossCumSum(nn.Cell):
  215. """ NetWithLossCumSum definition """
  216. def __init__(self, network):
  217. super(NetWithLossCumSum, self).__init__()
  218. self.loss = VirtualLoss()
  219. self.network = network
  220. def construct(self, input):
  221. predict = self.network(input)
  222. return self.loss(predict)
  223. class GradWrapCumSum(nn.Cell):
  224. """ GradWrap definition """
  225. def __init__(self, network):
  226. super(GradWrapCumSum, self).__init__()
  227. self.network = network
  228. def construct(self, input):
  229. return C.grad(self.network)(input)
  230. class NetCumSum(nn.Cell):
  231. """ NetCumSum definition """
  232. def __init__(self):
  233. super(NetCumSum, self).__init__()
  234. self.cumsum = P.CumSum()
  235. self.axis = 1
  236. def construct(self, input):
  237. return self.cumsum(input, self.axis)
  238. class SignNet(nn.Cell):
  239. def __init__(self):
  240. super(SignNet, self).__init__()
  241. self.sign = P.Sign()
  242. def construct(self, x):
  243. return self.sign(x)
  244. test_case_math_ops = [
  245. ('MatMulGrad', {
  246. 'block': GradWrap(NetWithLoss(MatMulNet())),
  247. 'desc_inputs': [Tensor(np.ones([3, 3]).astype(np.int32)),
  248. Tensor(np.ones([3, 3]).astype(np.int32)),
  249. Tensor(np.ones([3]).astype(np.int32))],
  250. 'desc_bprop': [Tensor(np.ones([3, 3]).astype(np.int32)),
  251. Tensor(np.ones([3, 3]).astype(np.int32)),
  252. Tensor(np.ones([3]).astype(np.int32))],
  253. 'skip': ['backward']}),
  254. ('CumSumGrad', {
  255. 'block': GradWrapCumSum(NetWithLossCumSum(NetCumSum())),
  256. 'desc_inputs': [Tensor(np.array([[3, 4, 6, 10],[1, 6, 7, 9],[4, 3, 8, 7],[1, 3, 7, 9]]).astype(np.float16))],
  257. 'desc_bprop': [Tensor(np.array([[3, 4, 6, 10],[1, 6, 7, 9],[4, 3, 8, 7],[1, 3, 7, 9]]).astype(np.float16))],
  258. 'skip': ['backward']}),
  259. ('Diag', {
  260. 'block': DiagNet(),
  261. 'desc_inputs': [Tensor(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]], np.float32))],
  262. 'desc_bprop': [Tensor(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]], np.float32))],
  263. 'skip': ['backward']}),
  264. ('SubBroadcast', {
  265. 'block': GradWrapSub(NetWithLossSub(SubNet())),
  266. 'desc_inputs': [Tensor(np.ones([5, 3])), Tensor(np.ones([8, 5, 3]))],
  267. 'desc_bprop': [Tensor(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]], np.float32))],
  268. 'skip': ['backward']}),
  269. ('NpuFloat_NotOverflow', {
  270. 'block': NpuFloatNet(),
  271. 'desc_inputs': [Tensor(np.full((8, 5, 3, 1), 655, dtype=np.float16), dtype=ms.float16)],
  272. 'desc_bprop': [Tensor(np.full((8, 5, 3, 1), 655, dtype=np.float16), dtype=ms.float16)],
  273. 'skip': ['backward']}),
  274. ('NpuFloat_Overflow', {
  275. 'block': NpuFloatNet(),
  276. 'desc_inputs': [Tensor(np.full((8, 5, 3, 1), 65504, dtype=np.float16), dtype=ms.float16)],
  277. 'desc_bprop': [Tensor(np.full((8, 5, 3, 1), 65504, dtype=np.float16), dtype=ms.float16)],
  278. 'skip': ['backward']}),
  279. ('Sign', {
  280. 'block': SignNet(),
  281. 'desc_inputs': [Tensor(np.array([[1., 0., -2.]], np.float32))],
  282. 'desc_bprop': [Tensor(np.array([[1., 0., -2.]], np.float32))],
  283. 'skip': ['backward']}),
  284. ]
  285. test_case_lists = [test_case_math_ops]
  286. test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists)
  287. # use -k to select certain testcast
  288. # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm
  289. import mindspore.context as context
  290. @non_graph_engine
  291. @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config)
  292. def test_exec():
  293. context.set_context(mode=context.GRAPH_MODE)
  294. return test_exec_case
  295. raise_set = [
  296. ('StridedSlice_1_Error', {
  297. 'block': (lambda x : P.StridedSlice(begin_mask="1"), {'exception': ValueError}),
  298. 'desc_inputs': [0]}),
  299. ('StridedSlice_2_Error', {
  300. 'block': (lambda x : P.StridedSlice(end_mask="1"), {'exception': ValueError}),
  301. 'desc_inputs': [0]}),
  302. ('StridedSlice_3_Error', {
  303. 'block': (lambda x : P.StridedSlice(ellipsis_mask=1.1), {'exception': ValueError}),
  304. 'desc_inputs': [0]}),
  305. ('StridedSlice_4_Error', {
  306. 'block': (lambda x : P.StridedSlice(new_axis_mask="1.1"), {'exception': ValueError}),
  307. 'desc_inputs': [0]}),
  308. ]
  309. @mindspore_test(pipeline_for_verify_exception_for_case_by_case_config)
  310. def test_check_exception():
  311. return raise_set