You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_atan_grad_op.py 1.8 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import pytest
  17. import mindspore.context as context
  18. from mindspore import Tensor
  19. import mindspore.ops.operations._grad_ops as P
  20. context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
  21. np.random.seed(1)
  22. @pytest.mark.level0
  23. @pytest.mark.platform_x86_gpu_training
  24. @pytest.mark.env_onecard
  25. def test_atangrad_fp32():
  26. x_np = np.random.rand(4, 2).astype(np.float32) * 10
  27. dout_np = np.random.rand(4, 2).astype(np.float32) * 10
  28. output_ms = P.AtanGrad()(Tensor(x_np), Tensor(dout_np))
  29. output_np = dout_np / (1 + np.square(x_np))
  30. assert np.allclose(output_ms.asnumpy(), output_np, 1e-4, 1e-4)
  31. @pytest.mark.level0
  32. @pytest.mark.platform_x86_gpu_training
  33. @pytest.mark.env_onecard
  34. def test_atangrad_fp16():
  35. x_np = np.random.rand(4, 2).astype(np.float16) * 10
  36. dout_np = np.random.rand(4, 2).astype(np.float16) * 10
  37. output_ms = P.AtanGrad()(Tensor(x_np), Tensor(dout_np))
  38. output_np = dout_np.astype(np.float32) / (1 + np.square(x_np.astype(np.float32)))
  39. assert np.allclose(output_ms.asnumpy(), output_np.astype(np.float16), 1e-3, 1e-3)