diff --git a/mindspore/ccsrc/utils/utils.h b/mindspore/ccsrc/utils/utils.h index 5971ecc91e..0689471f3f 100644 --- a/mindspore/ccsrc/utils/utils.h +++ b/mindspore/ccsrc/utils/utils.h @@ -497,8 +497,8 @@ const std::set kComputeDepend = {kUniqueOpName, kComputeAccidentalH const std::set k3DFormatSet = {kOpFormat_NCDHW, kOpFormat_NDC1HWC0, kOpFormat_FRACTAL_Z_3D}; const std::set DynamicShapeConstInputToAttr = { - kCastOpName, kExpandDimsOpName, kReshapeOpName, kEmbeddingLookupOpName, - kTransposeOpName, kReduceSumOpName, kConcatOpName}; + kCastOpName, kExpandDimsOpName, kReshapeOpName, kEmbeddingLookupOpName, kTransposeOpName, kReduceSumOpName, + kReduceMinOpName, kReduceMeanOpName, kReduceMaxOpName, kReduceAllOpName, kReduceAnyOpName, kConcatOpName}; static inline void ChangeFileMode(const std::string &file_name, mode_t mode) { try { diff --git a/mindspore/core/abstract/prim_maths.cc b/mindspore/core/abstract/prim_maths.cc index 32ba5c9c32..d1cd002cfb 100644 --- a/mindspore/core/abstract/prim_maths.cc +++ b/mindspore/core/abstract/prim_maths.cc @@ -121,7 +121,8 @@ AbstractBasePtr InferImplEqual(const AnalysisEnginePtr &, const PrimitivePtr &pr return ret; } -// To reduce code repeat, use InferImplReduceFunc. Currently registered with ReduceMean, ReduceSum. +// To reduce code repeat, use InferImplReduceFunc. Currently registered with ReduceMean, ReduceSum, +// ReduceAll, ReduceAny, ReduceMax, ReduceMin. AbstractBasePtr InferImplReduceFunc(const AnalysisEnginePtr &, const PrimitivePtr &primitive, const AbstractBasePtrList &args_spec_list) { const std::string op_name = primitive->name(); diff --git a/mindspore/core/abstract/primitive_infer_map.cc b/mindspore/core/abstract/primitive_infer_map.cc index 88b59947cf..9fce8d327a 100644 --- a/mindspore/core/abstract/primitive_infer_map.cc +++ b/mindspore/core/abstract/primitive_infer_map.cc @@ -46,6 +46,10 @@ PrimitiveEvalImplMap &GetPrimitiveToEvalImplMap() { {prim::kPrimEqual, {InferImplEqual, true}}, {prim::kPrimReduceSum, {InferImplReduceFunc, true}}, {prim::kPrimReduceMean, {InferImplReduceFunc, true}}, + {prim::kPrimReduceAll, {InferImplReduceFunc, true}}, + {prim::kPrimReduceAny, {InferImplReduceFunc, true}}, + {prim::kPrimReduceMax, {InferImplReduceFunc, true}}, + {prim::kPrimReduceMin, {InferImplReduceFunc, true}}, {prim::kPrimMinimum, {InferImplMinimum, true}}, {prim::kPrimDivNoNan, {InferImplDivNoNan, true}}, {prim::kPrimLinSpace, {InferImplLinSpace, true}}, diff --git a/tests/st/ops/gpu/test_reduce_all_op.py b/tests/st/ops/gpu/test_reduce_all_op.py index 2b87063bb9..bfc36c018f 100644 --- a/tests/st/ops/gpu/test_reduce_all_op.py +++ b/tests/st/ops/gpu/test_reduce_all_op.py @@ -21,6 +21,7 @@ import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function from mindspore.ops import operations as P +from mindspore.ops.operations import _inner_ops as inner x0 = np.array([[True, True], [True, False], [False, False]]) axis0 = 0 @@ -78,17 +79,50 @@ def test_ReduceAll(): output = reduce_all() expect0 = np.all(x0, axis=axis0, keepdims=keep_dims0) - np.allclose(output[0].asnumpy(), expect0) + assert np.allclose(output[0].asnumpy(), expect0) assert output[0].shape == expect0.shape expect1 = np.all(x1, axis=axis1, keepdims=keep_dims1) - np.allclose(output[1].asnumpy(), expect1) + assert np.allclose(output[1].asnumpy(), expect1) assert output[1].shape == expect1.shape expect2 = np.all(x2, axis=axis2, keepdims=keep_dims2) - np.allclose(output[2].asnumpy(), expect2) + assert np.allclose(output[2].asnumpy(), expect2) assert output[2].shape == expect2.shape expect3 = np.all(x3, axis=axis3, keepdims=keep_dims3) - np.allclose(output[3].asnumpy(), expect3) + assert np.allclose(output[3].asnumpy(), expect3) assert output[3].shape == expect3.shape + + +class ReduceAllDynamic(nn.Cell): + def __init__(self): + super(ReduceAllDynamic, self).__init__() + self.reduceall = P.ReduceAll(False) + self.test_dynamic = inner.GpuConvertToDynamicShape() + + def construct(self, x, axis): + x = self.test_dynamic(x) + return self.reduceall(x, axis) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_reduce_all_dynamic(): + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + net = ReduceAllDynamic() + + x_1 = np.array([[True, True], [True, False], [False, False]]) + axis_1 = 0 + expect_1 = np.all(x_1, axis=axis_1, keepdims=False) + + x_2 = np.array([[True, True], [True, True], [True, False], [False, False]]) + axis_2 = 0 + expect_2 = np.all(x_2, axis=axis_2, keepdims=False) + + output_1 = net(Tensor(x_1), axis_1) + output_2 = net(Tensor(x_2), axis_2) + + np.testing.assert_almost_equal(output_1.asnumpy(), expect_1) + np.testing.assert_almost_equal(output_2.asnumpy(), expect_2) diff --git a/tests/st/ops/gpu/test_reduce_any_op.py b/tests/st/ops/gpu/test_reduce_any_op.py index c2e579bc40..51874a699a 100644 --- a/tests/st/ops/gpu/test_reduce_any_op.py +++ b/tests/st/ops/gpu/test_reduce_any_op.py @@ -21,6 +21,7 @@ import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function from mindspore.ops import operations as P +from mindspore.ops.operations import _inner_ops as inner x0 = np.array([[True, True], [True, False], [False, False]]) axis0 = 0 @@ -77,18 +78,51 @@ def test_ReduceAny(): reduce_any = ReduceAny() output = reduce_any() - expect0 = np.all(x0, axis=axis0, keepdims=keep_dims0) - np.allclose(output[0].asnumpy(), expect0) + expect0 = np.any(x0, axis=axis0, keepdims=keep_dims0) + assert np.allclose(output[0].asnumpy(), expect0) assert output[0].shape == expect0.shape - expect1 = np.all(x1, axis=axis1, keepdims=keep_dims1) - np.allclose(output[1].asnumpy(), expect1) + expect1 = np.any(x1, axis=axis1, keepdims=keep_dims1) + assert np.allclose(output[1].asnumpy(), expect1) assert output[1].shape == expect1.shape - expect2 = np.all(x2, axis=axis2, keepdims=keep_dims2) - np.allclose(output[2].asnumpy(), expect2) + expect2 = np.any(x2, axis=axis2, keepdims=keep_dims2) + assert np.allclose(output[2].asnumpy(), expect2) assert output[2].shape == expect2.shape - expect3 = np.all(x3, axis=axis3, keepdims=keep_dims3) - np.allclose(output[3].asnumpy(), expect3) + expect3 = np.any(x3, axis=axis3, keepdims=keep_dims3) + assert np.allclose(output[3].asnumpy(), expect3) assert output[3].shape == expect3.shape + + +class ReduceAnyDynamic(nn.Cell): + def __init__(self): + super(ReduceAnyDynamic, self).__init__() + self.reduceany = P.ReduceAny(False) + self.test_dynamic = inner.GpuConvertToDynamicShape() + + def construct(self, x, axis): + x = self.test_dynamic(x) + return self.reduceany(x, axis) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_reduce_any_dynamic(): + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + net = ReduceAnyDynamic() + + x_1 = np.array([[True, True], [True, False], [False, False]]) + axis_1 = 0 + expect_1 = np.any(x_1, axis=axis_1, keepdims=False) + + x_2 = np.array([[True, True], [True, True], [True, False], [False, False]]) + axis_2 = 0 + expect_2 = np.any(x_2, axis=axis_2, keepdims=False) + + output_1 = net(Tensor(x_1), axis_1) + output_2 = net(Tensor(x_2), axis_2) + + np.testing.assert_almost_equal(output_1.asnumpy(), expect_1) + np.testing.assert_almost_equal(output_2.asnumpy(), expect_2) diff --git a/tests/st/ops/gpu/test_reduce_max_op.py b/tests/st/ops/gpu/test_reduce_max_op.py index afe558452d..968a0353e9 100644 --- a/tests/st/ops/gpu/test_reduce_max_op.py +++ b/tests/st/ops/gpu/test_reduce_max_op.py @@ -21,6 +21,8 @@ import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function from mindspore.ops import operations as P +from mindspore.ops.operations import _inner_ops as inner + x0 = np.random.rand(2, 3, 4, 4).astype(np.float32) axis0 = 3 @@ -175,3 +177,35 @@ def test_ReduceMax(): diff8 = abs(output[8].asnumpy() - expect8) error8 = np.ones(shape=expect8.shape) * 1.0e-5 assert np.all(diff8 < error8) + + +class ReduceMaxDynamic(nn.Cell): + def __init__(self): + super(ReduceMaxDynamic, self).__init__() + self.reducemax = P.ReduceMax(False) + self.test_dynamic = inner.GpuConvertToDynamicShape() + + def construct(self, x, axis): + x = self.test_dynamic(x) + return self.reducemax(x, axis) + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_reduce_max_dynamic(): + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + net = ReduceMaxDynamic() + + x_1 = x8 + axis_1 = 0 + expect_1 = np.max(x_1, axis=0, keepdims=False) + + x_2 = x1 + axis_2 = 0 + expect_2 = np.max(x_2, axis=0, keepdims=False) + + output_1 = net(Tensor(x_1), axis_1) + output_2 = net(Tensor(x_2), axis_2) + + np.testing.assert_almost_equal(output_1.asnumpy(), expect_1) + np.testing.assert_almost_equal(output_2.asnumpy(), expect_2) diff --git a/tests/st/ops/gpu/test_reduce_mean_op.py b/tests/st/ops/gpu/test_reduce_mean_op.py index 850148a950..5b4c3396a8 100644 --- a/tests/st/ops/gpu/test_reduce_mean_op.py +++ b/tests/st/ops/gpu/test_reduce_mean_op.py @@ -267,9 +267,9 @@ def test_ReduceMean(): assert np.all(diff14 < error14) assert output[14].shape == expect14.shape -class ReduceMean_Dynamic(nn.Cell): +class ReduceMeanDynamic(nn.Cell): def __init__(self, keepdims=False): - super(ReduceMean_Dynamic, self).__init__() + super(ReduceMeanDynamic, self).__init__() self.test_dynamic = inner.GpuConvertToDynamicShape() self.reducemean = P.ReduceMean(keep_dims=keepdims) @@ -281,8 +281,9 @@ class ReduceMean_Dynamic(nn.Cell): @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard -def test_dynamic_reducemean_keepdims_true(): - net = ReduceMean_Dynamic(keepdims=True) +def test_dynamic_reduce_mean_keepdims_true(): + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + net = ReduceMeanDynamic(keepdims=True) x_tensor_1 = Tensor(x14) output_1 = net(x_tensor_1, axis14) x_tensor_2 = Tensor(x0) @@ -303,8 +304,9 @@ def test_dynamic_reducemean_keepdims_true(): @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_onecard -def test_dynamic_reducemean_keepdims_false(): - net = ReduceMean_Dynamic(keepdims=False) +def test_dynamic_reduce_mean_keepdims_false(): + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + net = ReduceMeanDynamic(keepdims=False) x_tensor = Tensor(x12) output = net(x_tensor, axis12) diff --git a/tests/st/ops/gpu/test_reduce_min_op.py b/tests/st/ops/gpu/test_reduce_min_op.py index c502c549ae..5008e6116c 100644 --- a/tests/st/ops/gpu/test_reduce_min_op.py +++ b/tests/st/ops/gpu/test_reduce_min_op.py @@ -21,6 +21,8 @@ import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function from mindspore.ops import operations as P +from mindspore.ops.operations import _inner_ops as inner + x0 = np.random.rand(2, 3, 4, 4).astype(np.float32) axis0 = 3 @@ -175,3 +177,35 @@ def test_ReduceMin(): diff8 = abs(output[8].asnumpy() - expect8) error8 = np.ones(shape=expect8.shape) * 1.0e-5 assert np.all(diff8 < error8) + + +class ReduceMinDynamic(nn.Cell): + def __init__(self): + super(ReduceMinDynamic, self).__init__() + self.reducemin = P.ReduceMin(False) + self.test_dynamic = inner.GpuConvertToDynamicShape() + + def construct(self, x, axis): + x = self.test_dynamic(x) + return self.reducemin(x, axis) + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_reduce_min_dynamic(): + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + net = ReduceMinDynamic() + + x_1 = x8 + axis_1 = 0 + expect_1 = np.min(x_1, axis=0, keepdims=False) + + x_2 = x1 + axis_2 = 0 + expect_2 = np.min(x_2, axis=0, keepdims=False) + + output_1 = net(Tensor(x_1), axis_1) + output_2 = net(Tensor(x_2), axis_2) + + np.testing.assert_almost_equal(output_1.asnumpy(), expect_1) + np.testing.assert_almost_equal(output_2.asnumpy(), expect_2) diff --git a/tests/st/ops/gpu/test_reduce_sum_op.py b/tests/st/ops/gpu/test_reduce_sum_op.py index f80c421cbe..d89216969e 100644 --- a/tests/st/ops/gpu/test_reduce_sum_op.py +++ b/tests/st/ops/gpu/test_reduce_sum_op.py @@ -21,6 +21,7 @@ import mindspore.nn as nn from mindspore import Tensor from mindspore.common.api import ms_function from mindspore.ops import operations as P +from mindspore.ops.operations import _inner_ops as inner x0 = np.random.rand(2, 3, 4, 4).astype(np.float32) axis0 = 3 @@ -267,3 +268,36 @@ def test_ReduceSum(): error14 = np.ones(shape=expect14.shape) * 1.0e-5 assert np.all(diff14 < error14) assert output[14].shape == expect14.shape + + +class ReduceSumDynamic(nn.Cell): + def __init__(self): + super(ReduceSumDynamic, self).__init__() + self.reducesum = P.ReduceSum(True) + self.test_dynamic = inner.GpuConvertToDynamicShape() + + def construct(self, x, axis): + x = self.test_dynamic(x) + return self.reducesum(x, axis) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_reduce_sum_dynamic(): + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + net = ReduceSumDynamic() + + x_1 = x8 + axis_1 = 0 + expect_1 = np.sum(x_1, axis=axis_1, keepdims=True) + + x_2 = x1 + axis_2 = 0 + expect_2 = np.sum(x_2, axis=axis_2, keepdims=True) + + output_1 = net(Tensor(x_1), axis_1) + output_2 = net(Tensor(x_2), axis_2) + + np.testing.assert_almost_equal(output_1.asnumpy(), expect_1) + np.testing.assert_almost_equal(output_2.asnumpy(), expect_2)