You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_maxpool_gpu_op.py 10 kB

6 years ago
6 years ago
6 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. from functools import reduce
  16. import numpy as np
  17. import pytest
  18. import mindspore.context as context
  19. import mindspore.nn as nn
  20. import mindspore.ops.operations as P
  21. from mindspore import Tensor
  22. class Net_Pool(nn.Cell):
  23. def __init__(self):
  24. super(Net_Pool, self).__init__()
  25. self.maxpool_fun = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="VALID")
  26. def construct(self, x):
  27. return self.maxpool_fun(x)
  28. class Net_Pool2(nn.Cell):
  29. def __init__(self):
  30. super(Net_Pool2, self).__init__()
  31. self.maxpool_fun = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="SAME")
  32. def construct(self, x):
  33. return self.maxpool_fun(x)
  34. @pytest.mark.level0
  35. @pytest.mark.platform_x86_gpu_training
  36. @pytest.mark.env_onecard
  37. def test_maxpool2d():
  38. x = Tensor(np.array([[[
  39. [0, 1, 2, 3, -4, -5],
  40. [6, 7, 8, 9, -10, -11],
  41. [12, 13, 14, -15, -16, -17],
  42. [18, 19, 20, 21, 22, 23],
  43. [24, 25, 26, 27, 28, 29],
  44. [30, 31, 32, 33, 34, 35]
  45. ]]]).astype(np.float32))
  46. expect_result = (np.array([[[
  47. [7, 9, -4],
  48. [19, 21, 23],
  49. [31, 33, 35]
  50. ]]]))
  51. expect_result2 = (np.array([[[
  52. [14, 14, -4],
  53. [26, 28, 29],
  54. [32, 34, 35]
  55. ]]]))
  56. context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
  57. maxpool2d = Net_Pool()
  58. maxpool2d2 = Net_Pool2()
  59. output2 = maxpool2d2(x)
  60. output = maxpool2d(x)
  61. assert (output.asnumpy() == expect_result).all()
  62. assert (output2.asnumpy() == expect_result2).all()
  63. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  64. maxpool2d = Net_Pool()
  65. maxpool2d2 = Net_Pool2()
  66. output2 = maxpool2d2(x)
  67. output = maxpool2d(x)
  68. assert (output.asnumpy() == expect_result).all()
  69. assert (output2.asnumpy() == expect_result2).all()
  70. @pytest.mark.level0
  71. @pytest.mark.platform_x86_gpu_training
  72. @pytest.mark.env_onecard
  73. def test_max_pool3d_1():
  74. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  75. x_shape = (2, 3, 2, 3, 4)
  76. kernel_size = (2, 2, 3)
  77. strides = 1
  78. pad_mode = 'VALID'
  79. x_val = np.arange(reduce(lambda x, y: x * y, x_shape))
  80. x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32)
  81. output_ms = P.MaxPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms)
  82. expert_result = (np.array([[[[[18, 19],
  83. [22, 23]]],
  84. [[[42, 43],
  85. [46, 47]]],
  86. [[[66, 67],
  87. [70, 71]]]],
  88. [[[[90, 91],
  89. [94, 95]]],
  90. [[[114, 115],
  91. [118, 119]]],
  92. [[[138, 139],
  93. [142, 143]]]]]))
  94. assert (output_ms.asnumpy() == expert_result).all()
  95. @pytest.mark.level0
  96. @pytest.mark.platform_x86_gpu_training
  97. @pytest.mark.env_onecard
  98. def test_max_pool3d_2():
  99. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  100. x_shape = (2, 3, 2, 3, 4)
  101. kernel_size = 2
  102. strides = 1
  103. pad_mode = 'VALID'
  104. x_val = np.arange(reduce(lambda x, y: x * y, x_shape))
  105. x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32)
  106. output_ms = P.MaxPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms)
  107. expert_result = (np.array([[[[[17, 18, 19],
  108. [21, 22, 23]]],
  109. [[[41, 42, 43],
  110. [45, 46, 47]]],
  111. [[[65, 66, 67],
  112. [69, 70, 71]]]],
  113. [[[[89, 90, 91],
  114. [93, 94, 95]]],
  115. [[[113, 114, 115],
  116. [117, 118, 119]]],
  117. [[[137, 138, 139],
  118. [141, 142, 143]]]]]))
  119. assert (output_ms.asnumpy() == expert_result).all()
  120. @pytest.mark.level0
  121. @pytest.mark.platform_x86_gpu_training
  122. @pytest.mark.env_onecard
  123. def test_max_pool3d_3():
  124. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  125. x_shape = (2, 3, 2, 3, 4)
  126. kernel_size = 2
  127. strides = 3
  128. pad_mode = 'VALID'
  129. x_val = np.arange(reduce(lambda x, y: x * y, x_shape))
  130. x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32)
  131. output_ms = P.MaxPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms)
  132. expert_result = (np.array([[[[[17]]],
  133. [[[41]]],
  134. [[[65]]]],
  135. [[[[89]]],
  136. [[[113]]],
  137. [[[137]]]]]))
  138. assert (output_ms.asnumpy() == expert_result).all()
  139. @pytest.mark.level0
  140. @pytest.mark.platform_x86_gpu_training
  141. @pytest.mark.env_onecard
  142. def test_max_pool3d_4():
  143. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  144. x_shape = (2, 3, 2, 3, 4)
  145. kernel_size = (2, 2, 3)
  146. strides = 1
  147. pad_mode = 'SAME'
  148. x_val = np.arange(reduce(lambda x, y: x * y, x_shape))
  149. x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32)
  150. output_ms = P.MaxPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms)
  151. expert_result = (np.array([[[[[17, 18, 19, 19],
  152. [21, 22, 23, 23],
  153. [21, 22, 23, 23]],
  154. [[17, 18, 19, 19],
  155. [21, 22, 23, 23],
  156. [21, 22, 23, 23]]],
  157. [[[41, 42, 43, 43],
  158. [45, 46, 47, 47],
  159. [45, 46, 47, 47]],
  160. [[41, 42, 43, 43],
  161. [45, 46, 47, 47],
  162. [45, 46, 47, 47]]],
  163. [[[65, 66, 67, 67],
  164. [69, 70, 71, 71],
  165. [69, 70, 71, 71]],
  166. [[65, 66, 67, 67],
  167. [69, 70, 71, 71],
  168. [69, 70, 71, 71]]]],
  169. [[[[89, 90, 91, 91],
  170. [93, 94, 95, 95],
  171. [93, 94, 95, 95]],
  172. [[89, 90, 91, 91],
  173. [93, 94, 95, 95],
  174. [93, 94, 95, 95]]],
  175. [[[113, 114, 115, 115],
  176. [117, 118, 119, 119],
  177. [117, 118, 119, 119]],
  178. [[113, 114, 115, 115],
  179. [117, 118, 119, 119],
  180. [117, 118, 119, 119]]],
  181. [[[137, 138, 139, 139],
  182. [141, 142, 143, 143],
  183. [141, 142, 143, 143]],
  184. [[137, 138, 139, 139],
  185. [141, 142, 143, 143],
  186. [141, 142, 143, 143]]]]]))
  187. assert (output_ms.asnumpy() == expert_result).all()
  188. @pytest.mark.level0
  189. @pytest.mark.platform_x86_gpu_training
  190. @pytest.mark.env_onecard
  191. def test_max_pool3d_5():
  192. context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
  193. x_shape = (2, 3, 2, 3, 4)
  194. kernel_size = (2, 2, 3)
  195. strides = 1
  196. pad_mode = 'SAME'
  197. x_val = np.arange(reduce(lambda x, y: x * y, x_shape))
  198. x_ms = Tensor(x_val).reshape(x_shape).astype(np.float32)
  199. output_ms = P.MaxPool3D(kernel_size=kernel_size, strides=strides, pad_mode=pad_mode)(x_ms)
  200. expert_result = (np.array([[[[[17, 18, 19, 19],
  201. [21, 22, 23, 23],
  202. [21, 22, 23, 23]],
  203. [[17, 18, 19, 19],
  204. [21, 22, 23, 23],
  205. [21, 22, 23, 23]]],
  206. [[[41, 42, 43, 43],
  207. [45, 46, 47, 47],
  208. [45, 46, 47, 47]],
  209. [[41, 42, 43, 43],
  210. [45, 46, 47, 47],
  211. [45, 46, 47, 47]]],
  212. [[[65, 66, 67, 67],
  213. [69, 70, 71, 71],
  214. [69, 70, 71, 71]],
  215. [[65, 66, 67, 67],
  216. [69, 70, 71, 71],
  217. [69, 70, 71, 71]]]],
  218. [[[[89, 90, 91, 91],
  219. [93, 94, 95, 95],
  220. [93, 94, 95, 95]],
  221. [[89, 90, 91, 91],
  222. [93, 94, 95, 95],
  223. [93, 94, 95, 95]]],
  224. [[[113, 114, 115, 115],
  225. [117, 118, 119, 119],
  226. [117, 118, 119, 119]],
  227. [[113, 114, 115, 115],
  228. [117, 118, 119, 119],
  229. [117, 118, 119, 119]]],
  230. [[[137, 138, 139, 139],
  231. [141, 142, 143, 143],
  232. [141, 142, 143, 143]],
  233. [[137, 138, 139, 139],
  234. [141, 142, 143, 143],
  235. [141, 142, 143, 143]]]]]))
  236. assert (output_ms.asnumpy() == expert_result).all()