You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_batchnorm_op.py 5.8 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. # Copyright 2019 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import pytest
  16. import numpy as np
  17. from mindspore.nn import Cell
  18. from mindspore.nn import BatchNorm2d
  19. from mindspore.common.tensor import Tensor
  20. from mindspore.ops import composite as C
  21. import mindspore.context as context
  22. class Batchnorm_Net(Cell):
  23. def __init__(self, c, weight, bias, moving_mean, moving_var_init):
  24. super(Batchnorm_Net, self).__init__()
  25. self.bn = BatchNorm2d(c, eps=0.00001, momentum=0.1, beta_init=bias, gamma_init=weight,
  26. moving_mean_init=moving_mean, moving_var_init=moving_var_init)
  27. def construct(self, input_data):
  28. x = self.bn(input_data)
  29. return x
  30. class Grad(Cell):
  31. def __init__(self, network):
  32. super(Grad, self).__init__()
  33. self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True)
  34. self.network = network
  35. def construct(self, input_data, sens):
  36. gout = self.grad(self.network)(input_data, sens)
  37. return gout
  38. @pytest.mark.level0
  39. @pytest.mark.platform_x86_gpu_training
  40. @pytest.mark.env_onecard
  41. def test_train_forward():
  42. x = np.array([[
  43. [[1, 3, 3, 5], [2, 4, 6, 8], [3, 6, 7, 7], [4, 3, 8, 2]],
  44. [[5, 7, 6, 3], [3, 5, 6, 7], [9, 4, 2, 5], [7, 5, 8, 1]]]]).astype(np.float32)
  45. expect_output = np.array([[[[-0.6059, 0.3118, 0.3118, 1.2294],
  46. [-0.1471, 0.7706, 1.6882, 2.6059],
  47. [0.3118, 1.6882, 2.1471, 2.1471],
  48. [0.7706, 0.3118, 2.6059, -0.1471]],
  49. [[0.9119, 1.8518, 1.3819, -0.0281],
  50. [-0.0281, 0.9119, 1.3819, 1.8518],
  51. [2.7918, 0.4419, -0.4981, 0.9119],
  52. [1.8518, 0.9119, 2.3218, -0.9680]]]]).astype(np.float32)
  53. grad = np.array([[
  54. [[1, 2, 7, 1], [4, 2, 1, 3], [1, 6, 5, 2], [2, 4, 3, 2]],
  55. [[9, 4, 3, 5], [1, 3, 7, 6], [5, 7, 9, 9], [1, 4, 6, 8]]]]).astype(np.float32)
  56. weight = np.ones(2).astype(np.float32)
  57. bias = np.ones(2).astype(np.float32)
  58. moving_mean = np.ones(2).astype(np.float32)
  59. moving_var_init = np.ones(2).astype(np.float32)
  60. error = np.ones(shape=[1, 2, 4, 4]) * 1.0e-4
  61. context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
  62. bn_net = Batchnorm_Net(2, Tensor(weight), Tensor(bias), Tensor(moving_mean), Tensor(moving_var_init))
  63. bn_net.set_train()
  64. output = bn_net(Tensor(x))
  65. diff = output.asnumpy() - expect_output
  66. assert np.all(diff < error)
  67. assert np.all(-diff < error)
  68. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  69. bn_net = Batchnorm_Net(2, Tensor(weight), Tensor(bias), Tensor(moving_mean), Tensor(moving_var_init))
  70. bn_net.set_train()
  71. output = bn_net(Tensor(x))
  72. diff = output.asnumpy() - expect_output
  73. assert np.all(diff < error)
  74. assert np.all(-diff < error)
  75. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  76. bn_net = Batchnorm_Net(2, Tensor(weight), Tensor(bias), Tensor(moving_mean), Tensor(moving_var_init))
  77. bn_net.set_train(False)
  78. output = bn_net(Tensor(x))
  79. context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
  80. bn_net = Batchnorm_Net(2, Tensor(weight), Tensor(bias), Tensor(moving_mean), Tensor(moving_var_init))
  81. bn_net.set_train(False)
  82. output = bn_net(Tensor(x))
  83. @pytest.mark.level0
  84. @pytest.mark.platform_x86_gpu_training
  85. @pytest.mark.env_onecard
  86. def test_train_backward():
  87. x = np.array([[
  88. [[1, 3, 3, 5], [2, 4, 6, 8], [3, 6, 7, 7], [4, 3, 8, 2]],
  89. [[5, 7, 6, 3], [3, 5, 6, 7], [9, 4, 2, 5], [7, 5, 8, 1]]]]).astype(np.float32)
  90. grad = np.array([[
  91. [[1, 2, 7, 1], [4, 2, 1, 3], [1, 6, 5, 2], [2, 4, 3, 2]],
  92. [[9, 4, 3, 5], [1, 3, 7, 6], [5, 7, 9, 9], [1, 4, 6, 8]]]]).astype(np.float32)
  93. expect_output = np.array([[[[-0.69126546, -0.32903028, 1.9651246, -0.88445705],
  94. [0.6369296, -0.37732816, -0.93275493, -0.11168876],
  95. [-0.7878612, 1.3614, 0.8542711, -0.52222186],
  96. [-0.37732816, 0.5886317, -0.11168876, -0.28073236]],
  97. [[1.6447213, -0.38968924, -1.0174079, -0.55067265],
  98. [-2.4305856, -1.1751484, 0.86250514, 0.5502673],
  99. [0.39576983, 0.5470243, 1.1715001, 1.6447213],
  100. [-1.7996241, -0.7051701, 0.7080077, 0.5437813]]]]).astype(np.float32)
  101. weight = Tensor(np.ones(2).astype(np.float32))
  102. bias = Tensor(np.ones(2).astype(np.float32))
  103. moving_mean = Tensor(np.ones(2).astype(np.float32))
  104. moving_var_init = Tensor(np.ones(2).astype(np.float32))
  105. error = np.ones(shape=[1, 2, 4, 4]) * 1.0e-6
  106. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  107. bn_net = Batchnorm_Net(2, weight, bias, moving_mean, moving_var_init)
  108. bn_net.set_train()
  109. bn_grad = Grad(bn_net)
  110. output = bn_grad(Tensor(x), Tensor(grad))
  111. diff = output[0].asnumpy() - expect_output
  112. assert np.all(diff < error)
  113. assert np.all(-diff < error)