Browse Source

!11213 Add dynamic shape support to GPU ReduceMean

From: @TFbunny
Reviewed-by: @tom__chen,@robingrosman
Signed-off-by: @robingrosman
tags/v1.2.0-rc1
mindspore-ci-bot Gitee 4 years ago
parent
commit
6d0c0157aa
4 changed files with 55 additions and 5 deletions
  1. +2
    -2
      mindspore/core/abstract/infer_functions.h
  2. +3
    -2
      mindspore/core/abstract/prim_maths.cc
  3. +2
    -1
      mindspore/core/abstract/primitive_infer_map.cc
  4. +48
    -0
      tests/st/ops/gpu/test_reduce_mean_op.py

+ 2
- 2
mindspore/core/abstract/infer_functions.h View File

@@ -265,8 +265,8 @@ AbstractBasePtr InferImplSub(const AnalysisEnginePtr &, const PrimitivePtr &prim
const AbstractBasePtrList &args_spec_list);
AbstractBasePtr InferImplEqual(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
AbstractBasePtr InferImplReduceSum(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
AbstractBasePtr InferImplReduceFunc(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
AbstractBasePtr InferImplCast(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
AbstractBasePtr InferImplMinimum(const AnalysisEnginePtr &, const PrimitivePtr &primitive,


+ 3
- 2
mindspore/core/abstract/prim_maths.cc View File

@@ -121,8 +121,9 @@ AbstractBasePtr InferImplEqual(const AnalysisEnginePtr &, const PrimitivePtr &pr
return ret;
}

AbstractBasePtr InferImplReduceSum(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list) {
// To reduce code repeat, use InferImplReduceFunc. Currently registered with ReduceMean, ReduceSum.
AbstractBasePtr InferImplReduceFunc(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list) {
const std::string op_name = primitive->name();
CheckArgsSize(op_name, args_spec_list, 1);
auto input_x = CheckArg<AbstractTensor>(op_name, args_spec_list, 0);


+ 2
- 1
mindspore/core/abstract/primitive_infer_map.cc View File

@@ -44,7 +44,8 @@ PrimitiveEvalImplMap &GetPrimitiveToEvalImplMap() {
{prim::kPrimSqrtGrad, {InferImplSqrtGrad, true}},
{prim::kPrimSub, {InferImplSub, true}},
{prim::kPrimEqual, {InferImplEqual, true}},
{prim::kPrimReduceSum, {InferImplReduceSum, true}},
{prim::kPrimReduceSum, {InferImplReduceFunc, true}},
{prim::kPrimReduceMean, {InferImplReduceFunc, true}},
{prim::kPrimMinimum, {InferImplMinimum, true}},
{prim::kPrimDivNoNan, {InferImplDivNoNan, true}},
{prim::kPrimLinSpace, {InferImplLinSpace, true}},


+ 48
- 0
tests/st/ops/gpu/test_reduce_mean_op.py View File

@@ -21,6 +21,7 @@ import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.ops import operations as P
from mindspore.ops.operations import _inner_ops as inner

x0 = np.random.rand(2, 3, 4, 4).astype(np.float32)
axis0 = 3
@@ -265,3 +266,50 @@ def test_ReduceMean():
error14 = np.ones(shape=expect14.shape) * 1.0e-5
assert np.all(diff14 < error14)
assert output[14].shape == expect14.shape

class ReduceMean_Dynamic(nn.Cell):
def __init__(self, keepdims=False):
super(ReduceMean_Dynamic, self).__init__()
self.test_dynamic = inner.GpuConvertToDynamicShape()
self.reducemean = P.ReduceMean(keep_dims=keepdims)

def construct(self, input_x, axis):
input_x = self.test_dynamic(input_x)
output = self.reducemean(input_x, axis)
return output

@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_reducemean_keepdims_true():
net = ReduceMean_Dynamic(keepdims=True)
x_tensor_1 = Tensor(x14)
output_1 = net(x_tensor_1, axis14)
x_tensor_2 = Tensor(x0)
output_2 = net(x_tensor_2, axis0)

expect_1 = np.mean(x14, axis=np_axis14, keepdims=True)
diff_1 = abs(output_1.asnumpy() - expect_1)
error_1 = np.ones(shape=expect_1.shape) * 1.0e-5
assert np.all(diff_1 < error_1)
assert output_1.shape == expect_1.shape

expect_2 = np.mean(x0, axis=axis0, keepdims=True)
diff_2 = abs(output_2.asnumpy() - expect_2)
error_2 = np.ones(shape=expect_2.shape) * 1.0e-5
assert np.all(diff_2 < error_2)
assert output_2.shape == expect_2.shape

@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_dynamic_reducemean_keepdims_false():
net = ReduceMean_Dynamic(keepdims=False)
x_tensor = Tensor(x12)
output = net(x_tensor, axis12)

expect = np.mean(x12, axis=axis12, keepdims=False)
diff = abs(output.asnumpy() - expect)
error = np.ones(shape=expect.shape) * 1.0e-5
assert np.all(diff < error)
assert output.shape == expect.shape

Loading…
Cancel
Save