You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_squared_difference_op.py 11 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import pytest
  17. import mindspore.context as context
  18. import mindspore.nn as nn
  19. from mindspore import Tensor
  20. from mindspore.ops import operations as P
  21. class SquaredDifference(nn.Cell):
  22. def __init__(self):
  23. super(SquaredDifference, self).__init__()
  24. self.squaredDiff = P.SquaredDifference()
  25. def construct(self, x, y):
  26. return self.squaredDiff(x, y)
  27. @pytest.mark.level0
  28. @pytest.mark.platform_x86_gpu_training
  29. @pytest.mark.env_onecard
  30. def test_nobroadcast_f16():
  31. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  32. np.random.seed(42)
  33. net = SquaredDifference()
  34. input_x = np.random.uniform(10, 20, (3, 4, 5, 2)).astype(np.float16)
  35. input_y = np.random.uniform(40, 50, (3, 4, 5, 2)).astype(np.float16)
  36. output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
  37. diff = input_x-input_y
  38. expect = diff*diff
  39. assert np.all(output == expect)
  40. @pytest.mark.level0
  41. @pytest.mark.platform_x86_gpu_training
  42. @pytest.mark.env_onecard
  43. def test_nobroadcast_f32():
  44. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  45. np.random.seed(42)
  46. net = SquaredDifference()
  47. input_x = np.random.rand(3, 4, 5, 2).astype(np.float32)
  48. input_y = np.random.rand(3, 4, 5, 2).astype(np.float32)
  49. output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
  50. diff = input_x-input_y
  51. expect = diff*diff
  52. assert np.all(output == expect)
  53. @pytest.mark.level0
  54. @pytest.mark.platform_x86_gpu_training
  55. @pytest.mark.env_onecard
  56. def test_nobroadcast_int32():
  57. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  58. np.random.seed(42)
  59. net = SquaredDifference()
  60. input_x = np.random.rand(3, 4, 5, 2).astype(np.int32)
  61. input_y = np.random.rand(3, 4, 5, 2).astype(np.int32)
  62. output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
  63. diff = input_x-input_y
  64. expect = diff*diff
  65. assert np.all(output == expect)
  66. @pytest.mark.level0
  67. @pytest.mark.platform_x86_gpu_training
  68. @pytest.mark.env_onecard
  69. def test_broadcast_int32():
  70. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  71. np.random.seed(42)
  72. net = SquaredDifference()
  73. input_x = np.random.rand(1, 4, 1, 2).astype(np.int32)
  74. input_y = np.random.rand(3, 1, 5, 1).astype(np.int32)
  75. output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
  76. diff = input_x-input_y
  77. expect = diff*diff
  78. assert np.all(output == expect)
  79. @pytest.mark.level0
  80. @pytest.mark.platform_x86_gpu_training
  81. @pytest.mark.env_onecard
  82. def test_broadcast_f32():
  83. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  84. np.random.seed(42)
  85. net = SquaredDifference()
  86. input_x = np.random.rand(1, 4, 1, 2).astype(np.float32)
  87. input_y = np.random.rand(3, 1, 5, 1).astype(np.float32)
  88. output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
  89. diff = input_x-input_y
  90. expect = diff*diff
  91. assert np.all(output == expect)
  92. @pytest.mark.level0
  93. @pytest.mark.platform_x86_gpu_training
  94. @pytest.mark.env_onecard
  95. def test_broadcast_f16():
  96. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  97. np.random.seed(42)
  98. net = SquaredDifference()
  99. input_x = np.random.rand(1, 4, 1, 2).astype(np.float16)
  100. input_y = np.random.rand(3, 1, 5, 1).astype(np.float16)
  101. output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
  102. diff = input_x-input_y
  103. expect = diff*diff
  104. assert np.all(output == expect)
  105. @pytest.mark.level0
  106. @pytest.mark.platform_x86_gpu_training
  107. @pytest.mark.env_onecard
  108. def test_broadcast_bool():
  109. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  110. np.random.seed(42)
  111. net = SquaredDifference()
  112. input_x = np.random.rand(1, 4, 1, 2).astype(np.bool)
  113. input_y = np.random.uniform(10, 20, (3, 1, 5, 1)).astype(np.float32)
  114. output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
  115. diff = input_x-input_y
  116. expect = diff*diff
  117. error = np.ones(shape=np.array(output.shape, dtype=int))*1.0e-6
  118. double_check = np.abs(output-expect)/expect
  119. assert np.all(double_check < error)
  120. @pytest.mark.level0
  121. @pytest.mark.platform_x86_gpu_training
  122. @pytest.mark.env_onecard
  123. def test_nobroadcast_bool():
  124. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  125. np.random.seed(42)
  126. net = SquaredDifference()
  127. input_x = np.random.rand(3, 4, 5, 2).astype(np.bool)
  128. input_y = np.random.rand(3, 4, 5, 2).astype(np.float32)
  129. output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
  130. diff = input_x-input_y
  131. expect = diff*diff
  132. error = np.ones(shape=np.array(output.shape, dtype=int))*1.0e-6
  133. double_check = np.abs(output-expect)/expect
  134. assert np.all(double_check < error)
  135. @pytest.mark.level0
  136. @pytest.mark.platform_x86_gpu_training
  137. @pytest.mark.env_onecard
  138. def test_broadcast_int32_f16():
  139. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  140. np.random.seed(42)
  141. net = SquaredDifference()
  142. input_x = np.random.rand(1, 4, 1, 2).astype(np.int32)
  143. input_y = np.random.uniform(10, 20, (3, 1, 5, 1)).astype(np.float16)
  144. output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
  145. diff = input_x-input_y
  146. expect = diff*diff
  147. error = np.ones(shape=np.array(output.shape, dtype=int))*1.0e-3
  148. double_check = np.abs(output-expect)/expect
  149. assert np.all(double_check < error)
  150. @pytest.mark.level0
  151. @pytest.mark.platform_x86_gpu_training
  152. @pytest.mark.env_onecard
  153. def test_broadcast_int32_f32():
  154. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  155. np.random.seed(42)
  156. net = SquaredDifference()
  157. input_x = np.random.rand(1, 4, 1, 2).astype(np.int32)
  158. input_y = np.random.uniform(10, 20, (3, 1, 5, 1)).astype(np.float32)
  159. output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
  160. diff = input_x-input_y
  161. expect = diff*diff
  162. error = np.ones(shape=np.array(output.shape, dtype=int))*1.0e-6
  163. double_check = np.abs(output-expect)/expect
  164. assert np.all(double_check < error)
  165. @pytest.mark.level0
  166. @pytest.mark.platform_x86_gpu_training
  167. @pytest.mark.env_onecard
  168. def test_nobroadcast_int32_f16():
  169. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  170. np.random.seed(42)
  171. net = SquaredDifference()
  172. input_x = np.random.rand(2, 4, 3, 2).astype(np.int32)
  173. input_y = np.random.uniform(10, 20, (2, 4, 3, 2)).astype(np.float16)
  174. output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
  175. diff = input_x-input_y
  176. expect = diff*diff
  177. error = np.ones(shape=np.array(output.shape, dtype=int))*1.0e-3
  178. double_check = np.abs(output-expect)/expect
  179. assert np.all(double_check < error)
  180. @pytest.mark.level0
  181. @pytest.mark.platform_x86_gpu_training
  182. @pytest.mark.env_onecard
  183. def test_nobroadcast_int32_f32():
  184. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  185. np.random.seed(42)
  186. net = SquaredDifference()
  187. input_x = np.random.rand(2, 4, 3, 2).astype(np.int32)
  188. input_y = np.random.uniform(10, 20, (2, 4, 3, 2)).astype(np.float32)
  189. output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
  190. diff = input_x-input_y
  191. expect = diff*diff
  192. error = np.ones(shape=np.array(output.shape, dtype=int))*1.0e-6
  193. double_check = np.abs(output-expect)/expect
  194. assert np.all(double_check < error)
  195. @pytest.mark.level0
  196. @pytest.mark.platform_x86_gpu_training
  197. @pytest.mark.env_onecard
  198. def test_broadcast_f32_scalar_tensor():
  199. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  200. np.random.seed(42)
  201. net = SquaredDifference()
  202. input_x = np.random.rand(2).astype(np.float32)
  203. input_y = np.random.rand(3, 1, 5, 1).astype(np.float32)
  204. output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
  205. diff = input_x-input_y
  206. expect = diff*diff
  207. assert np.all(output == expect)
  208. @pytest.mark.level0
  209. @pytest.mark.platform_x86_gpu_training
  210. @pytest.mark.env_onecard
  211. def test_broadcast_f32_tensor_tensor():
  212. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  213. np.random.seed(42)
  214. net = SquaredDifference()
  215. input_x = np.random.rand(1, 2).astype(np.float32)
  216. input_y = np.random.rand(3, 1, 5, 1).astype(np.float32)
  217. output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
  218. diff = input_x-input_y
  219. expect = diff*diff
  220. assert np.all(output == expect)
  221. @pytest.mark.level0
  222. @pytest.mark.platform_x86_gpu_training
  223. @pytest.mark.env_onecard
  224. def test_broadcast_f32_tensor_tensor_dim_over_7():
  225. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  226. np.random.seed(42)
  227. net = SquaredDifference()
  228. input_x = np.random.rand(1, 2).astype(np.float32)
  229. input_y = np.random.rand(3, 1, 5, 1, 3, 4, 2, 1).astype(np.float32)
  230. try:
  231. net(Tensor(input_x), Tensor(input_y))
  232. except RuntimeError:
  233. assert True
  234. @pytest.mark.level0
  235. @pytest.mark.platform_x86_gpu_training
  236. @pytest.mark.env_onecard
  237. def test_broadcast_f32_tensor_tensor_cannot_brocast():
  238. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  239. np.random.seed(42)
  240. net = SquaredDifference()
  241. input_x = np.random.rand(5, 3).astype(np.float32)
  242. input_y = np.random.rand(3, 1, 5, 1, 3, 4, 2).astype(np.float32)
  243. try:
  244. net(Tensor(input_x), Tensor(input_y))
  245. except ValueError:
  246. assert True
  247. @pytest.mark.level0
  248. @pytest.mark.platform_x86_gpu_training
  249. @pytest.mark.env_onecard
  250. def test_broadcast_int_f32_precision():
  251. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  252. np.random.seed(42)
  253. net = SquaredDifference()
  254. input_x = np.random.randint(20, 30, (1, 2)).astype(np.int32)
  255. input_y = np.random.rand(3, 1, 5, 1).astype(np.float32)
  256. output = net(Tensor(input_x), Tensor(input_y)).asnumpy()
  257. diff = input_x-input_y
  258. expect = diff*diff
  259. error = np.ones(shape=np.array(output.shape, dtype=int))*1.0e-3
  260. double_thousand = np.abs(output-expect)/expect
  261. assert np.all(double_thousand < error)
  262. @pytest.mark.level0
  263. @pytest.mark.platform_x86_gpu_training
  264. @pytest.mark.env_onecard
  265. def test_broadcast_type_error():
  266. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  267. np.random.seed(42)
  268. net = SquaredDifference()
  269. input_x = np.random.randint(20, 30, (1, 2)).astype(np.bool)
  270. input_y = np.random.rand(3, 1, 5, 1).astype(np.bool)
  271. try:
  272. net(Tensor(input_x), Tensor(input_y))
  273. except TypeError:
  274. assert True