You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_conv2d_op.py 9.6 kB

5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import pytest
  17. import mindspore.context as context
  18. import mindspore.nn as nn
  19. from mindspore import Tensor
  20. from mindspore.ops import operations as P
  21. from mindspore.ops.operations import _inner_ops as inner
  22. from mindspore.common.parameter import Parameter
  23. from mindspore.common.initializer import initializer
  24. class NetConv2d(nn.Cell):
  25. def __init__(self):
  26. super(NetConv2d, self).__init__()
  27. out_channel = 2
  28. kernel_size = 1
  29. self.conv = P.Conv2D(out_channel,
  30. kernel_size,
  31. mode=1,
  32. pad_mode="valid",
  33. pad=0,
  34. stride=1,
  35. dilation=1,
  36. group=1)
  37. def construct(self, x, w):
  38. return self.conv(x, w)
  39. @pytest.mark.level0
  40. @pytest.mark.platform_x86_gpu_training
  41. @pytest.mark.env_onecard
  42. def test_conv2d():
  43. x = Tensor(np.arange(1 * 3 * 3 * 3).reshape(1, 3, 3, 3).astype(np.float32))
  44. w = Tensor(np.arange(2 * 3 * 1 * 1).reshape(2, 3, 1, 1).astype(np.float32))
  45. expect = np.array([[[[45, 48, 51],
  46. [54, 57, 60],
  47. [63, 66, 69]],
  48. [[126, 138, 150],
  49. [162, 174, 186],
  50. [198, 210, 222]]]]).astype(np.float32)
  51. context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU", max_device_memory="0.2GB")
  52. conv2d = NetConv2d()
  53. output = conv2d(x, w)
  54. assert (output.asnumpy() == expect).all()
  55. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  56. conv2d = NetConv2d()
  57. output = conv2d(x, w)
  58. assert (output.asnumpy() == expect).all()
  59. class NetConv(nn.Cell):
  60. def __init__(self, weight, x):
  61. super(NetConv, self).__init__()
  62. self.conv = nn.Conv2d(in_channels=3,
  63. out_channels=3,
  64. kernel_size=(5, 3),
  65. stride=2,
  66. pad_mode='same',
  67. padding=(0, 0, 0, 0),
  68. dilation=(1, 1),
  69. group=1,
  70. has_bias=False,
  71. weight_init=Tensor(weight)
  72. )
  73. self.x = Parameter(initializer(Tensor(x), [1, 3, 4, 2]), name="x")
  74. def construct(self):
  75. return self.conv(self.x)
  76. @pytest.mark.level0
  77. @pytest.mark.platform_x86_gpu_training
  78. @pytest.mark.env_onecard
  79. def test_conv():
  80. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  81. weight = np.array([[[[0.38968208, 0.14398979, 0.7962463],
  82. [-2.1836321, -0.63823014, -0.50588065],
  83. [0.6660469, 0.64673275, -0.13160042],
  84. [1.3683757, 1.4005762, -0.37235805],
  85. [-0.22638111, 0.45427424, -0.10293389]],
  86. [[1.4985064, -0.29318333, -0.92694616],
  87. [1.539068, 0.8937254, -1.2598171],
  88. [0.9658142, -0.63945454, -0.23185322],
  89. [1.363089, -0.41694695, -2.2750475],
  90. [-0.4865508, -1.6938025, 0.609849]],
  91. [[1.1844803, 0.99874926, -1.9475793],
  92. [0.4987858, 0.5307887, -0.04226681],
  93. [0.4529779, -1.1960793, 0.9456575],
  94. [3.133675, 0.2309789, -0.29201075],
  95. [-0.59632736, -0.0789804, -0.69486314]]],
  96. [[[-0.5606142, 0.6420862, 0.2478745],
  97. [0.02717604, 1.5483379, -0.9373383],
  98. [-1.1017276, -0.259478, 1.0311872],
  99. [1.8387799, 0.16468556, 0.33392152],
  100. [-1.8781787, 1.0158662, 1.6527579]],
  101. [[0.45696944, -0.5652523, -1.5618048],
  102. [-0.30304828, 0.1331878, -0.36955845],
  103. [0.91655576, 0.66612357, 0.3068175],
  104. [-0.45732066, 0.8923335, 1.0542952],
  105. [-0.73519516, 1.0518405, -1.0273266]],
  106. [[-0.79712886, -0.26814285, 0.12779616],
  107. [1.0367643, -1.6180774, 0.42999932],
  108. [-0.81818223, -0.81502074, 0.882194],
  109. [0.53640485, 0.4178927, 1.6037121],
  110. [0.9256354, -1.1006796, 0.16614541]]],
  111. [[[-1.5216796, -1.2473261, 0.6549515],
  112. [0.63627815, 0.7221449, 0.02977821],
  113. [-0.61331123, -0.49451825, 0.33852202],
  114. [1.4510741, -1.3818305, -0.791747],
  115. [0.6989747, 0.49558765, 1.0813237]],
  116. [[-0.03969796, 0.71586496, 0.8326594],
  117. [-0.15443641, 1.0389746, -0.59301984],
  118. [0.7197836, 0.03257621, 1.8398637],
  119. [0.6111736, -0.16166899, -2.4869773],
  120. [1.3066711, -1.8003578, 0.17412892]],
  121. [[-0.31470737, -0.5938182, -1.1311078],
  122. [-0.99081016, 0.4005125, 0.44154453],
  123. [1.0876914, -2.5958562, -0.5914863],
  124. [1.3759689, -0.7741513, 0.19928917],
  125. [1.6792973, 2.2744863, -0.04308867]]]]).astype(np.float32)
  126. x = np.array([[[[-1.4311737, 1.015344],
  127. [0.04431088, -2.2886624],
  128. [1.4832113, 1.240908],
  129. [0.67040104, 0.15266363]],
  130. [[0.44226435, 1.1461105],
  131. [1.194218, 1.5547837],
  132. [0.23152256, 1.5911953],
  133. [0.11206784, 0.17978816]],
  134. [[-0.57803905, 0.8039611],
  135. [0.0823025, -0.6134477],
  136. [-1.4171146, 1.6269946],
  137. [0.48878875, 0.9117505]]]]).astype(np.float32)
  138. conv2d = NetConv(weight, x)
  139. output = conv2d()
  140. expected = np.array([[[[2.3498724],
  141. [-1.9199573]],
  142. [[5.376562],
  143. [-5.425745]],
  144. [[5.9105043],
  145. [7.469034]]]]).astype(np.float32)
  146. loss = np.abs(expected - output.asnumpy())
  147. error = 1e-4 * np.ones(loss.shape)
  148. assert (loss < error).all()
  149. class NetConv2dDynamic(nn.Cell):
  150. def __init__(self, axis=0, out_nums=1):
  151. super(NetConv2dDynamic, self).__init__()
  152. self.dynshape = inner.GpuConvertToDynamicShape()
  153. out_channel = 2
  154. kernel_size = 1
  155. self.conv = P.Conv2D(out_channel,
  156. kernel_size,
  157. mode=1,
  158. pad_mode="valid",
  159. pad=0,
  160. stride=1,
  161. dilation=1,
  162. group=1)
  163. def construct(self, x, w):
  164. x_dyn = self.dynshape(x)
  165. w_dyn = self.dynshape(w)
  166. x_conv = self.conv(x_dyn, w_dyn)
  167. return x_conv
  168. @pytest.mark.level0
  169. @pytest.mark.platform_x86_gpu_training
  170. @pytest.mark.env_onecard
  171. def test_conv2d_dynamic():
  172. x1 = Tensor(np.arange(1 * 3 * 3 * 3).reshape(1, 3, 3, 3).astype(np.float32))
  173. w1 = Tensor(np.arange(2 * 3 * 1 * 1).reshape(2, 3, 1, 1).astype(np.float32))
  174. expect1 = np.array([[[[45, 48, 51],
  175. [54, 57, 60],
  176. [63, 66, 69]],
  177. [[126, 138, 150],
  178. [162, 174, 186],
  179. [198, 210, 222]]]]).astype(np.float32)
  180. x2 = Tensor(np.arange(5 * 1 * 2 * 2).reshape(5, 1, 2, 2).astype(np.float32))
  181. w2 = Tensor(np.arange(2 * 1 * 1 * 1).reshape(2, 1, 1, 1).astype(np.float32))
  182. expect2 = np.array([[[[0., 0.],
  183. [0., 0.]],
  184. [[0., 1.],
  185. [2., 3.]]],
  186. [[[0., 0.],
  187. [0., 0.]],
  188. [[4., 5.],
  189. [6., 7.]]],
  190. [[[0., 0.],
  191. [0., 0.]],
  192. [[8., 9.],
  193. [10., 11.]]],
  194. [[[0., 0.],
  195. [0., 0.]],
  196. [[12., 13.],
  197. [14., 15.]]],
  198. [[[0., 0.],
  199. [0., 0.]],
  200. [[16., 17.],
  201. [18., 19.]]]]).astype(np.float32)
  202. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  203. conv2d = NetConv2dDynamic()
  204. output1 = conv2d(x1, w1)
  205. assert (output1.asnumpy() == expect1).all()
  206. output2 = conv2d(x2, w2)
  207. assert (output2.asnumpy() == expect2).all()