You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_relu.py 1.9 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import pytest
  17. import mindspore.context as context
  18. import mindspore.nn as nn
  19. from mindspore import Tensor
  20. from mindspore.ops import operations as P
  21. class Net(nn.Cell):
  22. def __init__(self):
  23. super(Net, self).__init__()
  24. self.relu = P.ReLU()
  25. def construct(self, x):
  26. return self.relu(x)
  27. def get_output(x, enable_graph_kernel=False):
  28. if enable_graph_kernel:
  29. context.set_context(enable_graph_kernel=True)
  30. net = Net()
  31. output = net(x)
  32. return output
  33. def test_relu(shape, dtype):
  34. x = Tensor(np.random.normal(0, 10, shape).astype(dtype))
  35. expect = get_output(x, False)
  36. output = get_output(x, True)
  37. expect_np = expect.asnumpy().copy()
  38. output_np = output.asnumpy().copy()
  39. assert np.allclose(expect_np, output_np, 0.0001, 0.0001)
  40. @pytest.mark.level0
  41. @pytest.mark.platform_x86_gpu_training
  42. @pytest.mark.env_onecard
  43. def test_relu_gpu():
  44. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  45. test_relu((4, 3), np.int32)
  46. test_relu((12, 1), np.float16)
  47. def test_relu_ascend():
  48. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
  49. test_relu((4, 3), np.int32)
  50. test_relu((12, 1), np.float16)