Browse Source

add testcases and dynamic shape to reduce ops

tags/v1.2.0-rc1
TFBunny 4 years ago
parent
commit
6cd7dc42e9
9 changed files with 198 additions and 21 deletions
  1. +2
    -2
      mindspore/ccsrc/utils/utils.h
  2. +2
    -1
      mindspore/core/abstract/prim_maths.cc
  3. +4
    -0
      mindspore/core/abstract/primitive_infer_map.cc
  4. +38
    -4
      tests/st/ops/gpu/test_reduce_all_op.py
  5. +42
    -8
      tests/st/ops/gpu/test_reduce_any_op.py
  6. +34
    -0
      tests/st/ops/gpu/test_reduce_max_op.py
  7. +8
    -6
      tests/st/ops/gpu/test_reduce_mean_op.py
  8. +34
    -0
      tests/st/ops/gpu/test_reduce_min_op.py
  9. +34
    -0
      tests/st/ops/gpu/test_reduce_sum_op.py

+ 2
- 2
mindspore/ccsrc/utils/utils.h View File

@@ -497,8 +497,8 @@ const std::set<std::string> kComputeDepend = {kUniqueOpName, kComputeAccidentalH
const std::set<std::string> k3DFormatSet = {kOpFormat_NCDHW, kOpFormat_NDC1HWC0, kOpFormat_FRACTAL_Z_3D}; const std::set<std::string> k3DFormatSet = {kOpFormat_NCDHW, kOpFormat_NDC1HWC0, kOpFormat_FRACTAL_Z_3D};


const std::set<std::string> DynamicShapeConstInputToAttr = { const std::set<std::string> DynamicShapeConstInputToAttr = {
kCastOpName, kExpandDimsOpName, kReshapeOpName, kEmbeddingLookupOpName,
kTransposeOpName, kReduceSumOpName, kConcatOpName};
kCastOpName, kExpandDimsOpName, kReshapeOpName, kEmbeddingLookupOpName, kTransposeOpName, kReduceSumOpName,
kReduceMinOpName, kReduceMeanOpName, kReduceMaxOpName, kReduceAllOpName, kReduceAnyOpName, kConcatOpName};


static inline void ChangeFileMode(const std::string &file_name, mode_t mode) { static inline void ChangeFileMode(const std::string &file_name, mode_t mode) {
try { try {


+ 2
- 1
mindspore/core/abstract/prim_maths.cc View File

@@ -121,7 +121,8 @@ AbstractBasePtr InferImplEqual(const AnalysisEnginePtr &, const PrimitivePtr &pr
return ret; return ret;
} }


// To reduce code repeat, use InferImplReduceFunc. Currently registered with ReduceMean, ReduceSum.
// To reduce code repeat, use InferImplReduceFunc. Currently registered with ReduceMean, ReduceSum,
// ReduceAll, ReduceAny, ReduceMax, ReduceMin.
AbstractBasePtr InferImplReduceFunc(const AnalysisEnginePtr &, const PrimitivePtr &primitive, AbstractBasePtr InferImplReduceFunc(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list) { const AbstractBasePtrList &args_spec_list) {
const std::string op_name = primitive->name(); const std::string op_name = primitive->name();


+ 4
- 0
mindspore/core/abstract/primitive_infer_map.cc View File

@@ -46,6 +46,10 @@ PrimitiveEvalImplMap &GetPrimitiveToEvalImplMap() {
{prim::kPrimEqual, {InferImplEqual, true}}, {prim::kPrimEqual, {InferImplEqual, true}},
{prim::kPrimReduceSum, {InferImplReduceFunc, true}}, {prim::kPrimReduceSum, {InferImplReduceFunc, true}},
{prim::kPrimReduceMean, {InferImplReduceFunc, true}}, {prim::kPrimReduceMean, {InferImplReduceFunc, true}},
{prim::kPrimReduceAll, {InferImplReduceFunc, true}},
{prim::kPrimReduceAny, {InferImplReduceFunc, true}},
{prim::kPrimReduceMax, {InferImplReduceFunc, true}},
{prim::kPrimReduceMin, {InferImplReduceFunc, true}},
{prim::kPrimMinimum, {InferImplMinimum, true}}, {prim::kPrimMinimum, {InferImplMinimum, true}},
{prim::kPrimDivNoNan, {InferImplDivNoNan, true}}, {prim::kPrimDivNoNan, {InferImplDivNoNan, true}},
{prim::kPrimLinSpace, {InferImplLinSpace, true}}, {prim::kPrimLinSpace, {InferImplLinSpace, true}},


+ 38
- 4
tests/st/ops/gpu/test_reduce_all_op.py View File

@@ -21,6 +21,7 @@ import mindspore.nn as nn
from mindspore import Tensor from mindspore import Tensor
from mindspore.common.api import ms_function from mindspore.common.api import ms_function
from mindspore.ops import operations as P from mindspore.ops import operations as P
from mindspore.ops.operations import _inner_ops as inner


x0 = np.array([[True, True], [True, False], [False, False]]) x0 = np.array([[True, True], [True, False], [False, False]])
axis0 = 0 axis0 = 0
@@ -78,17 +79,50 @@ def test_ReduceAll():
output = reduce_all() output = reduce_all()


expect0 = np.all(x0, axis=axis0, keepdims=keep_dims0) expect0 = np.all(x0, axis=axis0, keepdims=keep_dims0)
np.allclose(output[0].asnumpy(), expect0)
assert np.allclose(output[0].asnumpy(), expect0)
assert output[0].shape == expect0.shape assert output[0].shape == expect0.shape


expect1 = np.all(x1, axis=axis1, keepdims=keep_dims1) expect1 = np.all(x1, axis=axis1, keepdims=keep_dims1)
np.allclose(output[1].asnumpy(), expect1)
assert np.allclose(output[1].asnumpy(), expect1)
assert output[1].shape == expect1.shape assert output[1].shape == expect1.shape


expect2 = np.all(x2, axis=axis2, keepdims=keep_dims2) expect2 = np.all(x2, axis=axis2, keepdims=keep_dims2)
np.allclose(output[2].asnumpy(), expect2)
assert np.allclose(output[2].asnumpy(), expect2)
assert output[2].shape == expect2.shape assert output[2].shape == expect2.shape


expect3 = np.all(x3, axis=axis3, keepdims=keep_dims3) expect3 = np.all(x3, axis=axis3, keepdims=keep_dims3)
np.allclose(output[3].asnumpy(), expect3)
assert np.allclose(output[3].asnumpy(), expect3)
assert output[3].shape == expect3.shape assert output[3].shape == expect3.shape


class ReduceAllDynamic(nn.Cell):
def __init__(self):
super(ReduceAllDynamic, self).__init__()
self.reduceall = P.ReduceAll(False)
self.test_dynamic = inner.GpuConvertToDynamicShape()

def construct(self, x, axis):
x = self.test_dynamic(x)
return self.reduceall(x, axis)


@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_reduce_all_dynamic():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = ReduceAllDynamic()

x_1 = np.array([[True, True], [True, False], [False, False]])
axis_1 = 0
expect_1 = np.all(x_1, axis=axis_1, keepdims=False)

x_2 = np.array([[True, True], [True, True], [True, False], [False, False]])
axis_2 = 0
expect_2 = np.all(x_2, axis=axis_2, keepdims=False)

output_1 = net(Tensor(x_1), axis_1)
output_2 = net(Tensor(x_2), axis_2)

np.testing.assert_almost_equal(output_1.asnumpy(), expect_1)
np.testing.assert_almost_equal(output_2.asnumpy(), expect_2)

+ 42
- 8
tests/st/ops/gpu/test_reduce_any_op.py View File

@@ -21,6 +21,7 @@ import mindspore.nn as nn
from mindspore import Tensor from mindspore import Tensor
from mindspore.common.api import ms_function from mindspore.common.api import ms_function
from mindspore.ops import operations as P from mindspore.ops import operations as P
from mindspore.ops.operations import _inner_ops as inner


x0 = np.array([[True, True], [True, False], [False, False]]) x0 = np.array([[True, True], [True, False], [False, False]])
axis0 = 0 axis0 = 0
@@ -77,18 +78,51 @@ def test_ReduceAny():
reduce_any = ReduceAny() reduce_any = ReduceAny()
output = reduce_any() output = reduce_any()


expect0 = np.all(x0, axis=axis0, keepdims=keep_dims0)
np.allclose(output[0].asnumpy(), expect0)
expect0 = np.any(x0, axis=axis0, keepdims=keep_dims0)
assert np.allclose(output[0].asnumpy(), expect0)
assert output[0].shape == expect0.shape assert output[0].shape == expect0.shape


expect1 = np.all(x1, axis=axis1, keepdims=keep_dims1)
np.allclose(output[1].asnumpy(), expect1)
expect1 = np.any(x1, axis=axis1, keepdims=keep_dims1)
assert np.allclose(output[1].asnumpy(), expect1)
assert output[1].shape == expect1.shape assert output[1].shape == expect1.shape


expect2 = np.all(x2, axis=axis2, keepdims=keep_dims2)
np.allclose(output[2].asnumpy(), expect2)
expect2 = np.any(x2, axis=axis2, keepdims=keep_dims2)
assert np.allclose(output[2].asnumpy(), expect2)
assert output[2].shape == expect2.shape assert output[2].shape == expect2.shape


expect3 = np.all(x3, axis=axis3, keepdims=keep_dims3)
np.allclose(output[3].asnumpy(), expect3)
expect3 = np.any(x3, axis=axis3, keepdims=keep_dims3)
assert np.allclose(output[3].asnumpy(), expect3)
assert output[3].shape == expect3.shape assert output[3].shape == expect3.shape


class ReduceAnyDynamic(nn.Cell):
def __init__(self):
super(ReduceAnyDynamic, self).__init__()
self.reduceany = P.ReduceAny(False)
self.test_dynamic = inner.GpuConvertToDynamicShape()

def construct(self, x, axis):
x = self.test_dynamic(x)
return self.reduceany(x, axis)


@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_reduce_any_dynamic():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = ReduceAnyDynamic()

x_1 = np.array([[True, True], [True, False], [False, False]])
axis_1 = 0
expect_1 = np.any(x_1, axis=axis_1, keepdims=False)

x_2 = np.array([[True, True], [True, True], [True, False], [False, False]])
axis_2 = 0
expect_2 = np.any(x_2, axis=axis_2, keepdims=False)

output_1 = net(Tensor(x_1), axis_1)
output_2 = net(Tensor(x_2), axis_2)

np.testing.assert_almost_equal(output_1.asnumpy(), expect_1)
np.testing.assert_almost_equal(output_2.asnumpy(), expect_2)

+ 34
- 0
tests/st/ops/gpu/test_reduce_max_op.py View File

@@ -21,6 +21,8 @@ import mindspore.nn as nn
from mindspore import Tensor from mindspore import Tensor
from mindspore.common.api import ms_function from mindspore.common.api import ms_function
from mindspore.ops import operations as P from mindspore.ops import operations as P
from mindspore.ops.operations import _inner_ops as inner



x0 = np.random.rand(2, 3, 4, 4).astype(np.float32) x0 = np.random.rand(2, 3, 4, 4).astype(np.float32)
axis0 = 3 axis0 = 3
@@ -175,3 +177,35 @@ def test_ReduceMax():
diff8 = abs(output[8].asnumpy() - expect8) diff8 = abs(output[8].asnumpy() - expect8)
error8 = np.ones(shape=expect8.shape) * 1.0e-5 error8 = np.ones(shape=expect8.shape) * 1.0e-5
assert np.all(diff8 < error8) assert np.all(diff8 < error8)


class ReduceMaxDynamic(nn.Cell):
def __init__(self):
super(ReduceMaxDynamic, self).__init__()
self.reducemax = P.ReduceMax(False)
self.test_dynamic = inner.GpuConvertToDynamicShape()

def construct(self, x, axis):
x = self.test_dynamic(x)
return self.reducemax(x, axis)

@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_reduce_max_dynamic():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = ReduceMaxDynamic()

x_1 = x8
axis_1 = 0
expect_1 = np.max(x_1, axis=0, keepdims=False)

x_2 = x1
axis_2 = 0
expect_2 = np.max(x_2, axis=0, keepdims=False)

output_1 = net(Tensor(x_1), axis_1)
output_2 = net(Tensor(x_2), axis_2)

np.testing.assert_almost_equal(output_1.asnumpy(), expect_1)
np.testing.assert_almost_equal(output_2.asnumpy(), expect_2)

+ 8
- 6
tests/st/ops/gpu/test_reduce_mean_op.py View File

@@ -267,9 +267,9 @@ def test_ReduceMean():
assert np.all(diff14 < error14) assert np.all(diff14 < error14)
assert output[14].shape == expect14.shape assert output[14].shape == expect14.shape


class ReduceMean_Dynamic(nn.Cell):
class ReduceMeanDynamic(nn.Cell):
def __init__(self, keepdims=False): def __init__(self, keepdims=False):
super(ReduceMean_Dynamic, self).__init__()
super(ReduceMeanDynamic, self).__init__()
self.test_dynamic = inner.GpuConvertToDynamicShape() self.test_dynamic = inner.GpuConvertToDynamicShape()
self.reducemean = P.ReduceMean(keep_dims=keepdims) self.reducemean = P.ReduceMean(keep_dims=keepdims)


@@ -281,8 +281,9 @@ class ReduceMean_Dynamic(nn.Cell):
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_dynamic_reducemean_keepdims_true():
net = ReduceMean_Dynamic(keepdims=True)
def test_dynamic_reduce_mean_keepdims_true():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = ReduceMeanDynamic(keepdims=True)
x_tensor_1 = Tensor(x14) x_tensor_1 = Tensor(x14)
output_1 = net(x_tensor_1, axis14) output_1 = net(x_tensor_1, axis14)
x_tensor_2 = Tensor(x0) x_tensor_2 = Tensor(x0)
@@ -303,8 +304,9 @@ def test_dynamic_reducemean_keepdims_true():
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_x86_gpu_training @pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard @pytest.mark.env_onecard
def test_dynamic_reducemean_keepdims_false():
net = ReduceMean_Dynamic(keepdims=False)
def test_dynamic_reduce_mean_keepdims_false():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = ReduceMeanDynamic(keepdims=False)
x_tensor = Tensor(x12) x_tensor = Tensor(x12)
output = net(x_tensor, axis12) output = net(x_tensor, axis12)




+ 34
- 0
tests/st/ops/gpu/test_reduce_min_op.py View File

@@ -21,6 +21,8 @@ import mindspore.nn as nn
from mindspore import Tensor from mindspore import Tensor
from mindspore.common.api import ms_function from mindspore.common.api import ms_function
from mindspore.ops import operations as P from mindspore.ops import operations as P
from mindspore.ops.operations import _inner_ops as inner
x0 = np.random.rand(2, 3, 4, 4).astype(np.float32) x0 = np.random.rand(2, 3, 4, 4).astype(np.float32)
axis0 = 3 axis0 = 3
@@ -175,3 +177,35 @@ def test_ReduceMin():
diff8 = abs(output[8].asnumpy() - expect8) diff8 = abs(output[8].asnumpy() - expect8)
error8 = np.ones(shape=expect8.shape) * 1.0e-5 error8 = np.ones(shape=expect8.shape) * 1.0e-5
assert np.all(diff8 < error8) assert np.all(diff8 < error8)
class ReduceMinDynamic(nn.Cell):
def __init__(self):
super(ReduceMinDynamic, self).__init__()
self.reducemin = P.ReduceMin(False)
self.test_dynamic = inner.GpuConvertToDynamicShape()
def construct(self, x, axis):
x = self.test_dynamic(x)
return self.reducemin(x, axis)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_reduce_min_dynamic():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = ReduceMinDynamic()
x_1 = x8
axis_1 = 0
expect_1 = np.min(x_1, axis=0, keepdims=False)
x_2 = x1
axis_2 = 0
expect_2 = np.min(x_2, axis=0, keepdims=False)
output_1 = net(Tensor(x_1), axis_1)
output_2 = net(Tensor(x_2), axis_2)
np.testing.assert_almost_equal(output_1.asnumpy(), expect_1)
np.testing.assert_almost_equal(output_2.asnumpy(), expect_2)

+ 34
- 0
tests/st/ops/gpu/test_reduce_sum_op.py View File

@@ -21,6 +21,7 @@ import mindspore.nn as nn
from mindspore import Tensor from mindspore import Tensor
from mindspore.common.api import ms_function from mindspore.common.api import ms_function
from mindspore.ops import operations as P from mindspore.ops import operations as P
from mindspore.ops.operations import _inner_ops as inner


x0 = np.random.rand(2, 3, 4, 4).astype(np.float32) x0 = np.random.rand(2, 3, 4, 4).astype(np.float32)
axis0 = 3 axis0 = 3
@@ -267,3 +268,36 @@ def test_ReduceSum():
error14 = np.ones(shape=expect14.shape) * 1.0e-5 error14 = np.ones(shape=expect14.shape) * 1.0e-5
assert np.all(diff14 < error14) assert np.all(diff14 < error14)
assert output[14].shape == expect14.shape assert output[14].shape == expect14.shape


class ReduceSumDynamic(nn.Cell):
def __init__(self):
super(ReduceSumDynamic, self).__init__()
self.reducesum = P.ReduceSum(True)
self.test_dynamic = inner.GpuConvertToDynamicShape()

def construct(self, x, axis):
x = self.test_dynamic(x)
return self.reducesum(x, axis)


@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_reduce_sum_dynamic():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = ReduceSumDynamic()

x_1 = x8
axis_1 = 0
expect_1 = np.sum(x_1, axis=axis_1, keepdims=True)

x_2 = x1
axis_2 = 0
expect_2 = np.sum(x_2, axis=axis_2, keepdims=True)

output_1 = net(Tensor(x_1), axis_1)
output_2 = net(Tensor(x_2), axis_2)

np.testing.assert_almost_equal(output_1.asnumpy(), expect_1)
np.testing.assert_almost_equal(output_2.asnumpy(), expect_2)

Loading…
Cancel
Save