You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_vm.py 9.0 kB

5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """ test_vm """
  16. import numpy as np
  17. from .....vm_impl import vm
  18. def test_avg_pooling():
  19. """ test_avg_pooling """
  20. input_data = np.array([[[[-4., -3., 1., 9.],
  21. [-9., -1., 3., 4.],
  22. [1., -1., -3., -6.],
  23. [-2., -1., -2., -15.]]]]).astype(np.float32)
  24. out = vm.avg_pooling(input_data, pool_h=2, pool_w=2, stride=1)
  25. expect_out = [[[[-4.25, 0.0, 4.25],
  26. [-2.5, -0.5, -0.5],
  27. [-0.75, -1.75, -6.5]]]]
  28. assert (expect_out == out).all()
  29. def test_avg_pool_grad():
  30. """ test_avg_pool_grad """
  31. # To do
  32. input_data = np.array([[[[1., 2, 3, 4],
  33. [5, 6, 7, 8],
  34. [9, 10, 11, 12],
  35. [13, 14, 15, 16]]]]).astype(np.float32)
  36. dout = vm.avg_pooling(input_data, pool_h=2, pool_w=2, stride=1)
  37. print("vm.avg_pooling dout: ", dout)
  38. out = vm.avg_pool_grad(dout, input_data.shape, 2, 2, 1)
  39. print("vm.avg_pool_grad: ", out)
  40. assert True
  41. def test_batch_norm():
  42. """ test_batch_norm """
  43. input_data = np.random.randint(0, 255, [1, 3, 224, 224])
  44. print("input_data.shape: ", input_data.shape)
  45. print("input_data: ", input_data)
  46. output = vm.batch_norm(input_data)
  47. print("vm.batch_norm: ", output)
  48. def test_conv2d():
  49. """ test_conv2d """
  50. x = np.array([[[
  51. [3, 0, 1, 2, 7, 4],
  52. [1, 5, 8, 9, 3, 1],
  53. [2, 7, 2, 5, 1, 3],
  54. [0, 1, 3, 1, 7, 8],
  55. [4, 2, 1, 6, 2, 8],
  56. [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)
  57. weight = np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)
  58. out = vm.conv2d(x, weight)
  59. expect_out = np.array([[[
  60. [-5., -4., 0., 8.],
  61. [-10., -2., 2., 3.],
  62. [0., -2., -4., -7.],
  63. [-3., -2., -3., -16.]]]]).astype(np.float32)
  64. assert (expect_out == out).all()
  65. def test_conv2d_with_bias():
  66. """ test_conv2d_with_bias """
  67. x = np.array([[[
  68. [3, 0, 1, 2, 7, 4],
  69. [1, 5, 8, 9, 3, 1],
  70. [2, 7, 2, 5, 1, 3],
  71. [0, 1, 3, 1, 7, 8],
  72. [4, 2, 1, 6, 2, 8],
  73. [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)
  74. weight = np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)
  75. bias = np.array([1]).astype(np.float32)
  76. out = vm.conv2d(x, weight, bias)
  77. expect_out = np.array([[[
  78. [-4., -3., 1., 9.],
  79. [-9., -1., 3., 4.],
  80. [1., -1., -3., -6.],
  81. [-2., -1., -2., -15.]]]]).astype(np.float32)
  82. assert (expect_out == out).all()
  83. def test_conv2d_backprop_filter():
  84. """ test_conv2d_backprop_filter """
  85. x = np.array([[[
  86. [3, 0, 1, 2, 7, 4],
  87. [1, 5, 8, 9, 3, 1],
  88. [2, 7, 2, 5, 1, 3],
  89. [0, 1, 3, 1, 7, 8],
  90. [4, 2, 1, 6, 2, 8],
  91. [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)
  92. weight = np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)
  93. out = vm.conv2d(x, weight)
  94. backprop_filter = vm.conv2d_backprop_filter(out, x, weight.shape)
  95. print(backprop_filter)
  96. assert True
  97. def test_conv2d_backprop_input():
  98. """ test_conv2d_backprop_input """
  99. x = np.array([[[
  100. [3, 0, 1, 2, 7, 4],
  101. [1, 5, 8, 9, 3, 1],
  102. [2, 7, 2, 5, 1, 3],
  103. [0, 1, 3, 1, 7, 8],
  104. [4, 2, 1, 6, 2, 8],
  105. [2, 4, 5, 2, 3, 9]]]]).astype(np.float32)
  106. weight = np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)
  107. out = vm.conv2d(x, weight)
  108. grad = vm.conv2d_backprop_input(out, x.shape, weight)
  109. print(grad)
  110. assert True
  111. def test_flatten():
  112. """ test_flatten """
  113. x = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
  114. y = vm.flatten(x)
  115. assert ([1, 2, 3, 4, 5, 6] == y.T).all()
  116. assert np.float32 == y.dtype
  117. def test_flatten2():
  118. """ test_flatten2 """
  119. x = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
  120. y = vm.flatten2(x)
  121. assert ([1, 2, 3, 4, 5, 6] == y).all()
  122. assert (1, 6) == y.shape
  123. assert np.float32 == y.dtype
  124. def test_flatten_batch():
  125. """ test_flatten_batch """
  126. x = np.array([[[9, 4, 14, 1],
  127. [7, 10, 14, 13],
  128. [1, 9, 16, 7],
  129. [15, 16, 0, 4]],
  130. [[16, 13, 13, 10],
  131. [0, 12, 5, 9],
  132. [15, 0, 11, 1],
  133. [4, 16, 4, 1]],
  134. [[2, 8, 1, 13],
  135. [5, 15, 4, 11],
  136. [8, 2, 17, 16],
  137. [5, 13, 0, 2]],
  138. [[14, 8, 6, 8],
  139. [0, 8, 6, 15],
  140. [9, 1, 8, 5],
  141. [12, 6, 13, 8]],
  142. [[13, 11, 6, 3],
  143. [8, 6, 16, 5],
  144. [7, 10, 0, 8],
  145. [17, 17, 17, 3]]]).astype(np.float32)
  146. y = vm.flatten_batch(x)
  147. expect_out = np.array(
  148. [[9, 4, 14, 1, 7, 10, 14, 13, 1, 9, 16, 7, 15, 16, 0, 4],
  149. [16, 13, 13, 10, 0, 12, 5, 9, 15, 0, 11, 1, 4, 16, 4, 1],
  150. [2, 8, 1, 13, 5, 15, 4, 11, 8, 2, 17, 16, 5, 13, 0, 2],
  151. [14, 8, 6, 8, 0, 8, 6, 15, 9, 1, 8, 5, 12, 6, 13, 8],
  152. [13, 11, 6, 3, 8, 6, 16, 5, 7, 10, 0, 8, 17, 17, 17, 3]]).astype(np.float32)
  153. assert (expect_out == y).all()
  154. assert expect_out.shape == y.shape
  155. assert np.float32 == y.dtype
  156. def test_im2col():
  157. """ test_im2col """
  158. img = np.ones([1, 1, 32, 32]).astype(np.float32) * 0.01
  159. print("input img: ", img)
  160. col = vm.im2col(img, 2, 3, 1, 1)
  161. print("output col.shape : ", col.shape)
  162. print("output col: ", col)
  163. print("output col.dtype: ", col.dtype)
  164. assert np.float32 == col.dtype
  165. def test_matmul():
  166. """ test_matmul """
  167. x = np.array([1, 2, 3]).astype(np.float32)
  168. w = np.array([0, 1, 0.5]).astype(np.float32)
  169. y = vm.matmul(x, w)
  170. assert y == 3.5
  171. assert np.float32 == y.dtype
  172. def test_max_pooling():
  173. """ test_max_pooling """
  174. input_data = np.array([[[
  175. [-4., -3., 1., 9.],
  176. [-9., -1., 3., 4.],
  177. [1., -1., -3., -6.],
  178. [-2., -1., -2., -15.]]]]).astype(np.float32)
  179. out = vm.max_pooling(input_data, pool_h=2, pool_w=2, stride=1)
  180. expect_out = [[[[-1., 3., 9.],
  181. [1., 3., 4.],
  182. [1., -1., -2.]]]]
  183. assert (expect_out == out).all()
  184. assert np.float32 == out.dtype
  185. def test_np_convolve():
  186. """ test_np_convolve """
  187. out = np.convolve([1, 2, 3], [0, 1, 0.5]).astype(np.float32)
  188. assert ([0.0, 1.0, 2.5, 4.0, 1.5] == out).all()
  189. assert np.float32 == out.dtype
  190. def test_np_convolve_same():
  191. """ test_np_convolve_same """
  192. out = np.convolve([1, 2, 3], [0, 1, 0.5], 'same').astype(np.float32)
  193. assert ([1.0, 2.5, 4.0] == out).all()
  194. assert np.float32 == out.dtype
  195. def test_np_convolve_valid():
  196. """ test_np_convolve_valid """
  197. out = np.convolve([1, 2, 3], [0, 1, 0.5], 'valid').astype(np.float32)
  198. assert ([2.5] == out).all()
  199. assert np.float32 == out.dtype
  200. def test_relu():
  201. """ test_relu """
  202. x = np.array([-0.32208174, 0.33999891]).astype(np.float32)
  203. y = vm.relu(x)
  204. assert np.allclose([-0., 0.33999891], y)
  205. assert np.float32 == y.dtype
  206. y = vm.relu_grad(y)
  207. assert (y == [0., 1.]).all()
  208. assert np.float32 == y.dtype
  209. def test_softmax():
  210. """ test_softmax """
  211. logits = 2.84806275 * np.ones([1, 10]).astype(np.float32)
  212. y = vm.softmax(logits)
  213. assert np.allclose([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], y)
  214. assert np.float32 == y.dtype
  215. logits = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
  216. y = vm.softmax(logits, axis=1)
  217. labels = [[0.09003057, 0.24472847, 0.66524096], [0.09003057, 0.24472847, 0.66524096]]
  218. assert np.allclose(labels, y)
  219. assert np.float32 == y.dtype
  220. def test_softmax_cross_entropy_with_logit():
  221. """ test_softmax_cross_entropy_with_logit """
  222. logits = np.array([[1, 2, 3, 4, 2, 1, 0, 2, 1, 1], [1, 2, 4, 1, 0, 5, 0, 2, 1, 3]], dtype=np.float32)
  223. labels = np.array([[0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32)
  224. loss, dx = vm.softmax_cross_entropy_with_logits(logits, labels)
  225. print("logits.shape: ", logits.shape)
  226. print("logits: ", logits)
  227. print("softmax: ", vm.softmax(logits))
  228. print("labels: ", labels)
  229. print("loss: ", loss)
  230. print("dx: ", dx)
  231. assert np.float32 == loss.dtype
  232. assert np.float32 == dx.dtype