You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_relu_v2.py 4.9 kB

5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import pytest
  17. import mindspore.context as context
  18. import mindspore.nn as nn
  19. from mindspore import Tensor
  20. from mindspore.ops import operations as P
  21. import mindspore.ops.operations._grad_ops as G
  22. class ReluNet(nn.Cell):
  23. def __init__(self):
  24. super(ReluNet, self).__init__()
  25. self.relu = P.ReLU()
  26. self.relu_grad = G.ReluGrad()
  27. def construct(self, x, dy):
  28. y = self.relu(x)
  29. dx = self.relu_grad(dy, y)
  30. return y, dx
  31. @pytest.mark.level0
  32. @pytest.mark.platform_x86_gpu_training
  33. @pytest.mark.env_onecard
  34. def test_ReluV2():
  35. context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True)
  36. x = Tensor(np.array([[[[-1, 1, 10],
  37. [1, -1, 1],
  38. [10, 1, -1]]]]).astype(np.float32))
  39. dy = Tensor(np.array([[[[1, 0, 3],
  40. [0, 1, 0],
  41. [2, 1, 1]]]]).astype(np.float32))
  42. expect_y = np.array([[[[0, 1, 10,],
  43. [1, 0, 1,],
  44. [10, 1, 0.]]]]).astype(np.float32)
  45. expect_dx = np.array([[[[0, 0, 3],
  46. [0, 0, 0],
  47. [2, 1, 0]]]]).astype(np.float32)
  48. net = ReluNet()
  49. y, dx = net(Tensor(x), Tensor(dy))
  50. assert np.allclose(y.asnumpy(), expect_y)
  51. assert np.allclose(dx.asnumpy(), expect_dx)
  52. class AddReluNet(nn.Cell):
  53. def __init__(self):
  54. super(AddReluNet, self).__init__()
  55. self.add = P.TensorAdd()
  56. self.relu = P.ReLU()
  57. self.relu_grad = G.ReluGrad()
  58. def construct(self, x1, x2, dy):
  59. y = self.add(x1, x2)
  60. y = self.relu(y)
  61. dx = self.relu_grad(dy, y)
  62. return y, dx
  63. @pytest.mark.level0
  64. @pytest.mark.platform_x86_gpu_training
  65. @pytest.mark.env_onecard
  66. def test_AddRelu():
  67. context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True)
  68. x1 = Tensor(np.array([[[[-1, 1, 10],
  69. [1, -1, 1],
  70. [10, 1, -1]]]]).astype(np.float32))
  71. x2 = Tensor(np.array([[[[-1, 1, 10],
  72. [1, -1, 1],
  73. [10, 1, -1]]]]).astype(np.float32))
  74. dy = Tensor(np.array([[[[1, 0, 3],
  75. [0, 1, 0],
  76. [2, 1, 1]]]]).astype(np.float32))
  77. expect_y = np.array([[[[0, 2, 20],
  78. [2, 0, 2],
  79. [20, 2, 0]]]]).astype(np.float32)
  80. expect_dx = np.array([[[[0, 0, 3],
  81. [0, 0, 0],
  82. [2, 1, 0]]]]).astype(np.float32)
  83. net = AddReluNet()
  84. y, dx1 = net(Tensor(x1), Tensor(x2), Tensor(dy))
  85. assert np.allclose(y.asnumpy(), expect_y)
  86. assert np.allclose(dx1.asnumpy(), expect_dx)
  87. class AddReluGradNet(nn.Cell):
  88. def __init__(self):
  89. super(AddReluGradNet, self).__init__()
  90. self.add = P.TensorAdd()
  91. self.relu = P.ReLU()
  92. self.relu_grad = G.ReluGrad()
  93. def construct(self, x, dy1, dy2):
  94. y = self.relu(x)
  95. dy = self.add(dy1, dy2)
  96. dx = self.relu_grad(dy, y)
  97. return y, dx
  98. @pytest.mark.level0
  99. @pytest.mark.platform_x86_gpu_training
  100. @pytest.mark.env_onecard
  101. def test_AddReluGrad():
  102. context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=True)
  103. x = Tensor(np.array([[[[-1, 1, 10],
  104. [1, -1, 1],
  105. [10, 1, -1]]]]).astype(np.float32))
  106. dy1 = Tensor(np.array([[[[1, 0, 3],
  107. [0, 1, 0],
  108. [2, 1, 1]]]]).astype(np.float32))
  109. dy2 = Tensor(np.array([[[[1, 0, 3],
  110. [0, 1, 0],
  111. [2, 1, 1]]]]).astype(np.float32))
  112. expect_y = np.array([[[[0, 1, 10,],
  113. [1, 0, 1,],
  114. [10, 1, 0.]]]]).astype(np.float32)
  115. expect_dx = np.array([[[[0, 0, 6],
  116. [0, 0, 0],
  117. [4, 2, 0]]]]).astype(np.float32)
  118. net = AddReluGradNet()
  119. y, dx1 = net(Tensor(x), Tensor(dy1), Tensor(dy2))
  120. assert np.allclose(y.asnumpy(), expect_y)
  121. assert np.allclose(dx1.asnumpy(), expect_dx)