Browse Source

!11731 Add dynamic shape support to ReLU6 GPU

From: @TFbunny
Reviewed-by: 
Signed-off-by:
tags/v1.2.0-rc1
mindspore-ci-bot Gitee 4 years ago
parent
commit
96cea98864
3 changed files with 42 additions and 6 deletions
  1. +1
    -0
      mindspore/core/abstract/primitive_infer_map.cc
  2. +4
    -5
      mindspore/ops/operations/nn_ops.py
  3. +37
    -1
      tests/st/ops/gpu/test_relu6_op.py

+ 1
- 0
mindspore/core/abstract/primitive_infer_map.cc View File

@@ -141,6 +141,7 @@ PrimitiveEvalImplMap &GetPrimitiveToEvalImplMap() {
{prim::kPrimBiasAdd, {InferImplBiasAdd, true}},
{prim::kPrimBiasAddGrad, {InferImplBiasAddGrad, true}},
{prim::kPrimRelu, {InferImplRelu, true}},
{prim::kPrimRelu6, {InferImplRelu, true}},
{prim::kPrimZerosLike, {InferImplZerosLike, true}},
{prim::kPrimBpropCut, {InferImplBpropCut, true}},
{prim::kPrimLayerNorm, {InferImplLayerNorm, true}},


+ 4
- 5
mindspore/ops/operations/nn_ops.py View File

@@ -441,7 +441,7 @@ class SeLU(PrimitiveWithInfer):
return x_dtype


class ReLU6(PrimitiveWithInfer):
class ReLU6(PrimitiveWithCheck):
r"""
Computes ReLU (Rectified Linear Unit) upper bounded by 6 of input tensors element-wise.

@@ -477,12 +477,11 @@ class ReLU6(PrimitiveWithInfer):
"""Initialize ReLU6"""
self.init_prim_io_names(inputs=['x'], outputs=['output'])

def infer_shape(self, input_x):
return input_x
def check_shape(self, input_x):
pass

def infer_dtype(self, input_x):
def check_dtype(self, input_x):
validator.check_tensor_dtype_valid('input_x', input_x, (mstype.float16, mstype.float32), self.name)
return input_x


class ReLUV2(PrimitiveWithInfer):


+ 37
- 1
tests/st/ops/gpu/test_relu6_op.py View File

@@ -1,4 +1,4 @@
# Copyright 2020 Huawei Technologies Co., Ltd
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,6 +20,7 @@ import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops.operations import _inner_ops as inner


class NetReLU6(nn.Cell):
@@ -31,6 +32,17 @@ class NetReLU6(nn.Cell):
return self.relu6(x)


class NetRelu6Dynamic(nn.Cell):
def __init__(self):
super(NetRelu6Dynamic, self).__init__()
self.test_dynamic = inner.GpuConvertToDynamicShape()
self.relu6 = P.ReLU6()

def construct(self, x):
x = self.test_dynamic(x)
return self.relu6(x)


@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@@ -51,3 +63,27 @@ def test_relu6():
relu6 = NetReLU6()
output = relu6(x)
assert (output.asnumpy() == expect).all()


@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_relu6_dynamic():

x1 = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]).astype(np.float32))
expect1 = np.array([[0, 4, 0,],
[2, 0, 6,]]).astype(np.float32)
x2 = Tensor(np.array([[[[-1, 1, 10],
[5.9, 6.1, 6],
[10, 1, -1]]]]).astype(np.float32))
expect2 = np.array([[[[0, 1, 6,],
[5.9, 6, 6,],
[6, 1, 0.]]]]).astype(np.float32)


context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
relu6 = NetRelu6Dynamic()
output1 = relu6(x1)
assert (output1.asnumpy() == expect1).all()
output2 = relu6(x2)
assert (output2.asnumpy() == expect2).all()

Loading…
Cancel
Save