You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_sub_op.py 5.4 kB

5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. # Copyright 2019-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import pytest
  17. import mindspore.context as context
  18. import mindspore.nn as nn
  19. from mindspore import Tensor
  20. from mindspore.ops import operations as P
  21. from mindspore.ops.operations import _inner_ops as inner
  22. class Net(nn.Cell):
  23. def __init__(self):
  24. super(Net, self).__init__()
  25. self.sub = P.Sub()
  26. def construct(self, x, y):
  27. return self.sub(x, y)
  28. class NetDynamic(nn.Cell):
  29. def __init__(self):
  30. super(NetDynamic, self).__init__()
  31. self.d = inner.GpuConvertToDynamicShape()
  32. self.sub = P.Sub()
  33. def construct(self, x, y):
  34. x = self.d(x)
  35. y = self.d(y)
  36. out = self.sub(x, y)
  37. return out
  38. def sub(nptype):
  39. np_x0 = np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(nptype)
  40. np_y0 = np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(nptype)
  41. np_x1 = np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(nptype)
  42. np_y1 = np.random.uniform(-2, 2, (2, 1, 4, 4)).astype(nptype)
  43. np_x2 = np.random.uniform(-2, 2, (2, 1, 1, 4)).astype(nptype)
  44. np_y2 = np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(nptype)
  45. np_x3 = np.random.uniform(-2, 2, 1).astype(nptype)
  46. np_y3 = np.random.uniform(-2, 2, 1).astype(nptype)
  47. np_x4 = np.array(768).astype(nptype)
  48. np_y4 = np.array(3072.5).astype(nptype)
  49. x0 = Tensor(np_x0)
  50. y0 = Tensor(np_y0)
  51. x1 = Tensor(np_x1)
  52. y1 = Tensor(np_y1)
  53. x2 = Tensor(np_x2)
  54. y2 = Tensor(np_y2)
  55. x3 = Tensor(np_x3)
  56. y3 = Tensor(np_y3)
  57. x4 = Tensor(np_x4)
  58. y4 = Tensor(np_y4)
  59. expect0 = np.subtract(np_x0, np_y0)
  60. error0 = np.ones(shape=expect0.shape) * 1.0e-5
  61. expect1 = np.subtract(np_x1, np_y1)
  62. error1 = np.ones(shape=expect1.shape) * 1.0e-5
  63. expect2 = np.subtract(np_x2, np_y2)
  64. error2 = np.ones(shape=expect2.shape) * 1.0e-5
  65. expect3 = np.subtract(np_x3, np_y3)
  66. error3 = np.ones(shape=expect3.shape) * 1.0e-5
  67. expect4 = np.subtract(np_x4, np_y4)
  68. error4 = np.ones(shape=expect4.shape) * 1.0e-5
  69. context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
  70. sub_net = Net()
  71. output0 = sub_net(x0, y0)
  72. output1 = sub_net(x1, y1)
  73. output2 = sub_net(x2, y2)
  74. output3 = sub_net(x3, y3)
  75. output4 = sub_net(x4, y4)
  76. diff0 = output0.asnumpy() - expect0
  77. assert np.all(diff0 < error0)
  78. assert output0.shape == expect0.shape
  79. diff1 = output1.asnumpy() - expect1
  80. assert np.all(diff1 < error1)
  81. assert output1.shape == expect1.shape
  82. diff2 = output2.asnumpy() - expect2
  83. assert np.all(diff2 < error2)
  84. assert output2.shape == expect2.shape
  85. diff3 = output3.asnumpy() - expect3
  86. assert np.all(diff3 < error3)
  87. assert output3.shape == expect3.shape
  88. diff4 = output4.asnumpy() - expect4
  89. assert np.all(diff4 < error4)
  90. assert output4.shape == expect4.shape
  91. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  92. sub_net = Net()
  93. output0 = sub_net(x0, y0)
  94. output1 = sub_net(x1, y1)
  95. output2 = sub_net(x2, y2)
  96. output3 = sub_net(x3, y3)
  97. output4 = sub_net(x4, y4)
  98. diff0 = output0.asnumpy() - expect0
  99. assert np.all(diff0 < error0)
  100. assert output0.shape == expect0.shape
  101. diff1 = output1.asnumpy() - expect1
  102. assert np.all(diff1 < error1)
  103. assert output1.shape == expect1.shape
  104. diff2 = output2.asnumpy() - expect2
  105. assert np.all(diff2 < error2)
  106. assert output2.shape == expect2.shape
  107. diff3 = output3.asnumpy() - expect3
  108. assert np.all(diff3 < error3)
  109. assert output3.shape == expect3.shape
  110. diff4 = output4.asnumpy() - expect4
  111. assert np.all(diff4 < error4)
  112. assert output4.shape == expect4.shape
  113. #dynamic shape
  114. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  115. d_sub_net = NetDynamic()
  116. output3 = d_sub_net(x3, y3)
  117. output0 = d_sub_net(x0, y0)
  118. diff3 = output3.asnumpy() - expect3
  119. assert np.all(diff3 < error3)
  120. assert output3.shape == expect3.shape
  121. diff0 = output0.asnumpy() - expect0
  122. assert np.all(diff0 < error0)
  123. assert output0.shape == expect0.shape
  124. @pytest.mark.level0
  125. @pytest.mark.platform_x86_gpu_training
  126. @pytest.mark.env_onecard
  127. def test_sub_float64():
  128. sub(np.float64)
  129. @pytest.mark.level0
  130. @pytest.mark.platform_x86_gpu_training
  131. @pytest.mark.env_onecard
  132. def test_sub_float32():
  133. sub(np.float32)
  134. @pytest.mark.level0
  135. @pytest.mark.platform_x86_gpu_training
  136. @pytest.mark.env_onecard
  137. def test_sub_float16():
  138. sub(np.float16)
  139. @pytest.mark.level0
  140. @pytest.mark.platform_x86_gpu_training
  141. @pytest.mark.env_onecard
  142. def test_sub_int64():
  143. sub(np.int64)
  144. @pytest.mark.level0
  145. @pytest.mark.platform_x86_gpu_training
  146. @pytest.mark.env_onecard
  147. def test_sub_int32():
  148. sub(np.int32)