Browse Source

!5895 gpu maximum minimum absgrad kernel fix

Merge pull request !5895 from chenweifeng/maximum-fix
tags/v1.0.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
6fdb43d22d
2 changed files with 44 additions and 1 deletions
  1. +2
    -1
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/broadcast_impl.cu
  2. +42
    -0
      tests/st/ops/gpu/test_broadcast_op.py

+ 2
- 1
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/broadcast_impl.cu View File

@@ -202,7 +202,8 @@ void ElewiseArith(const int &nums, enum BroadcastOpType op, const T *x0, const T
template <>
void ElewiseArith(const int &nums, enum BroadcastOpType op, const half *x0, const half *x1, half *y,
cudaStream_t stream) {
if (nums % 2 == 0) {
// `>` return true iff both half result are true. fallback to half
if (nums % 2 == 0 && op != BROADCAST_TYPE_MINIMUM && op != BROADCAST_TYPE_MAXIMUM && op != BROADCAST_TYPE_ABSGRAD) {
ElewiseArithKernel<half2>(nums / 2, op, reinterpret_cast<const half2 *>(x0), reinterpret_cast<const half2 *>(x1),
reinterpret_cast<half2 *>(y), stream);
} else {


+ 42
- 0
tests/st/ops/gpu/test_broadcast_op.py View File

@@ -68,6 +68,48 @@ def test_nobroadcast():
assert np.allclose(output_ms.asnumpy(), output_np)


@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_nobroadcast_fp16():
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')

x1_np = np.random.rand(10, 20).astype(np.float16)
x2_np = np.random.rand(10, 20).astype(np.float16)

output_ms = P.Minimum()(Tensor(x1_np), Tensor(x2_np))
output_np = np.minimum(x1_np, x2_np)
assert np.allclose(output_ms.asnumpy(), output_np)

output_ms = P.Maximum()(Tensor(x1_np), Tensor(x2_np))
output_np = np.maximum(x1_np, x2_np)
assert np.allclose(output_ms.asnumpy(), output_np)

output_ms = P.Greater()(Tensor(x1_np), Tensor(x2_np))
output_np = x1_np > x2_np
assert np.allclose(output_ms.asnumpy(), output_np)

output_ms = P.Less()(Tensor(x1_np), Tensor(x2_np))
output_np = x1_np < x2_np
assert np.allclose(output_ms.asnumpy(), output_np)

output_ms = P.Pow()(Tensor(x1_np), Tensor(x2_np))
output_np = np.power(x1_np, x2_np)
assert np.allclose(output_ms.asnumpy(), output_np)

output_ms = P.RealDiv()(Tensor(x1_np), Tensor(x2_np))
output_np = x1_np / x2_np
assert np.allclose(output_ms.asnumpy(), output_np)

output_ms = P.Mul()(Tensor(x1_np), Tensor(x2_np))
output_np = x1_np * x2_np
assert np.allclose(output_ms.asnumpy(), output_np)

output_ms = P.Sub()(Tensor(x1_np), Tensor(x2_np))
output_np = x1_np - x2_np
assert np.allclose(output_ms.asnumpy(), output_np)


@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard


Loading…
Cancel
Save