You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_mul_op.py 7.1 kB

5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. # Copyright 2019-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import pytest
  17. import mindspore.context as context
  18. import mindspore.nn as nn
  19. from mindspore import Tensor
  20. from mindspore.ops import operations as P
  21. from mindspore.ops.operations import _inner_ops as inner
  22. class NetMul(nn.Cell):
  23. def __init__(self):
  24. super(NetMul, self).__init__()
  25. self.mul = P.Mul()
  26. def construct(self, x, y):
  27. return self.mul(x, y)
  28. def mul(nptype):
  29. x0_np = np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(nptype)
  30. y0_np = np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(nptype)
  31. x1_np = np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(nptype)
  32. y1_np = np.random.uniform(-2, 2, (2, 1, 4, 4)).astype(nptype)
  33. x2_np = np.random.uniform(-2, 2, (2, 1, 1, 4)).astype(nptype)
  34. y2_np = np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(nptype)
  35. x3_np = np.random.uniform(-2, 2, 1).astype(nptype)
  36. y3_np = np.random.uniform(-2, 2, 1).astype(nptype)
  37. x4_np = np.array(78).astype(nptype)
  38. y4_np = np.array(37.5).astype(nptype)
  39. x0 = Tensor(x0_np)
  40. y0 = Tensor(y0_np)
  41. x1 = Tensor(x1_np)
  42. y1 = Tensor(y1_np)
  43. x2 = Tensor(x2_np)
  44. y2 = Tensor(y2_np)
  45. x3 = Tensor(x3_np)
  46. y3 = Tensor(y3_np)
  47. x4 = Tensor(x4_np)
  48. y4 = Tensor(y4_np)
  49. context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
  50. mul_net = NetMul()
  51. output0 = mul_net(x0, y0)
  52. expect0 = np.multiply(x0_np, y0_np)
  53. diff0 = output0.asnumpy() - expect0
  54. error0 = np.ones(shape=expect0.shape) * 1.0e-5
  55. assert np.all(diff0 < error0)
  56. assert output0.shape == expect0.shape
  57. output1 = mul_net(x1, y1)
  58. expect1 = np.multiply(x1_np, y1_np)
  59. diff1 = output1.asnumpy() - expect1
  60. error1 = np.ones(shape=expect1.shape) * 1.0e-5
  61. assert np.all(diff1 < error1)
  62. assert output1.shape == expect1.shape
  63. output2 = mul_net(x2, y2)
  64. expect2 = np.multiply(x2_np, y2_np)
  65. diff2 = output2.asnumpy() - expect2
  66. error2 = np.ones(shape=expect2.shape) * 1.0e-5
  67. assert np.all(diff2 < error2)
  68. assert output2.shape == expect2.shape
  69. output3 = mul_net(x3, y3)
  70. expect3 = np.multiply(x3_np, y3_np)
  71. diff3 = output3.asnumpy() - expect3
  72. error3 = np.ones(shape=expect3.shape) * 1.0e-5
  73. assert np.all(diff3 < error3)
  74. assert output3.shape == expect3.shape
  75. output4 = mul_net(x4, y4)
  76. expect4 = np.multiply(x4_np, y4_np)
  77. diff4 = output4.asnumpy() - expect4
  78. error4 = np.ones(shape=expect4.shape) * 1.0e-5
  79. assert np.all(diff4 < error4)
  80. assert output4.shape == expect4.shape
  81. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  82. mul_net = NetMul()
  83. output0 = mul_net(x0, y0)
  84. expect0 = np.multiply(x0_np, y0_np)
  85. diff0 = output0.asnumpy() - expect0
  86. error0 = np.ones(shape=expect0.shape) * 1.0e-5
  87. assert np.all(diff0 < error0)
  88. assert output0.shape == expect0.shape
  89. output1 = mul_net(x1, y1)
  90. expect1 = np.multiply(x1_np, y1_np)
  91. diff1 = output1.asnumpy() - expect1
  92. error1 = np.ones(shape=expect1.shape) * 1.0e-5
  93. assert np.all(diff1 < error1)
  94. assert output1.shape == expect1.shape
  95. output2 = mul_net(x2, y2)
  96. expect2 = np.multiply(x2_np, y2_np)
  97. diff2 = output2.asnumpy() - expect2
  98. error2 = np.ones(shape=expect2.shape) * 1.0e-5
  99. assert np.all(diff2 < error2)
  100. assert output2.shape == expect2.shape
  101. output3 = mul_net(x3, y3)
  102. expect3 = np.multiply(x3_np, y3_np)
  103. diff3 = output3.asnumpy() - expect3
  104. error3 = np.ones(shape=expect3.shape) * 1.0e-5
  105. assert np.all(diff3 < error3)
  106. assert output3.shape == expect3.shape
  107. output4 = mul_net(x4, y4)
  108. expect4 = np.multiply(x4_np, y4_np)
  109. diff4 = output4.asnumpy() - expect4
  110. error4 = np.ones(shape=expect4.shape) * 1.0e-5
  111. assert np.all(diff4 < error4)
  112. assert output4.shape == expect4.shape
  113. @pytest.mark.level0
  114. @pytest.mark.platform_x86_gpu_training
  115. @pytest.mark.env_onecard
  116. def test_mul_float64():
  117. mul(np.float64)
  118. @pytest.mark.level0
  119. @pytest.mark.platform_x86_gpu_training
  120. @pytest.mark.env_onecard
  121. def test_mul_float32():
  122. mul(np.float32)
  123. @pytest.mark.level0
  124. @pytest.mark.platform_x86_gpu_training
  125. @pytest.mark.env_onecard
  126. def test_mul_float16():
  127. mul(np.float16)
  128. @pytest.mark.level0
  129. @pytest.mark.platform_x86_gpu_training
  130. @pytest.mark.env_onecard
  131. def test_mul_int64():
  132. mul(np.int64)
  133. @pytest.mark.level0
  134. @pytest.mark.platform_x86_gpu_training
  135. @pytest.mark.env_onecard
  136. def test_mul_int32():
  137. mul(np.int32)
  138. class NetMul_dynamic(nn.Cell):
  139. def __init__(self):
  140. super(NetMul_dynamic, self).__init__()
  141. self.mul = P.Mul()
  142. self.test_dynamic = inner.GpuConvertToDynamicShape()
  143. def construct(self, x, y):
  144. x = self.test_dynamic(x)
  145. y = self.test_dynamic(y)
  146. out = self.mul(x, y)
  147. return out
  148. def mul_dynamic(nptype):
  149. x1_np = np.array([78]).astype(nptype)
  150. y1_np = np.array([37.5]).astype(nptype)
  151. x2_np = np.random.uniform(-2, 2, (2, 1, 1, 4)).astype(nptype)
  152. y2_np = np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(nptype)
  153. x1 = Tensor(x1_np)
  154. y1 = Tensor(y1_np)
  155. x2 = Tensor(x2_np)
  156. y2 = Tensor(y2_np)
  157. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  158. mul_net = NetMul_dynamic()
  159. output1 = mul_net(x1, y1)
  160. output2 = mul_net(x2, y2)
  161. expect1 = np.multiply(x1_np, y1_np)
  162. expect2 = np.multiply(x2_np, y2_np)
  163. diff1 = output1.asnumpy() - expect1
  164. diff2 = output2.asnumpy() - expect2
  165. error1 = np.ones(shape=expect1.shape) * 1.0e-5
  166. assert np.all(diff1 < error1)
  167. assert output1.shape == expect1.shape
  168. error2 = np.ones(shape=expect2.shape) * 1.0e-5
  169. assert np.all(diff2 < error2)
  170. assert output2.shape == expect2.shape
  171. @pytest.mark.level0
  172. @pytest.mark.platform_x86_gpu_training
  173. @pytest.mark.env_onecard
  174. def test_mul_dynamic_float64():
  175. mul_dynamic(np.float64)
  176. @pytest.mark.level0
  177. @pytest.mark.platform_x86_gpu_training
  178. @pytest.mark.env_onecard
  179. def test_mul_dynamic_float32():
  180. mul_dynamic(np.float32)
  181. @pytest.mark.level0
  182. @pytest.mark.platform_x86_gpu_training
  183. @pytest.mark.env_onecard
  184. def test_mul_dynamic_float16():
  185. mul_dynamic(np.float16)
  186. @pytest.mark.level0
  187. @pytest.mark.platform_x86_gpu_training
  188. @pytest.mark.env_onecard
  189. def test_mul_dynamic_int64():
  190. mul_dynamic(np.int64)
  191. @pytest.mark.level0
  192. @pytest.mark.platform_x86_gpu_training
  193. @pytest.mark.env_onecard
  194. def test_mul_dynamic_int32():
  195. mul_dynamic(np.int32)