From c713382798faaab5a29d005741c6b61a81bd27cd Mon Sep 17 00:00:00 2001 From: wuxuejian Date: Tue, 7 Jul 2020 20:15:16 +0800 Subject: [PATCH] add Padding op --- mindspore/ops/_op_impl/aicpu/__init__.py | 1 + mindspore/ops/_op_impl/aicpu/padding.py | 41 +++++++++++++++++++ mindspore/ops/operations/__init__.py | 3 +- mindspore/ops/operations/array_ops.py | 40 ++++++++++++++++++ tests/st/ops/ascend/test_drop_out_gen_mask.py | 2 +- tests/st/ops/ascend/test_padding.py | 40 ++++++++++++++++++ 6 files changed, 125 insertions(+), 2 deletions(-) create mode 100644 mindspore/ops/_op_impl/aicpu/padding.py create mode 100644 tests/st/ops/ascend/test_padding.py diff --git a/mindspore/ops/_op_impl/aicpu/__init__.py b/mindspore/ops/_op_impl/aicpu/__init__.py index 8eb08aea2f..5551dc58b4 100644 --- a/mindspore/ops/_op_impl/aicpu/__init__.py +++ b/mindspore/ops/_op_impl/aicpu/__init__.py @@ -15,6 +15,7 @@ """aicpu ops""" from .init_data_set_queue import _init_data_set_queue_aicpu from .embedding_lookup import _embedding_lookup_aicpu +from .padding import _padding_aicpu from .dropout_genmask import _dropout_genmask_aicpu from .get_next import _get_next_aicpu from .print_tensor import _print_aicpu diff --git a/mindspore/ops/_op_impl/aicpu/padding.py b/mindspore/ops/_op_impl/aicpu/padding.py new file mode 100644 index 0000000000..4a67376fbd --- /dev/null +++ b/mindspore/ops/_op_impl/aicpu/padding.py @@ -0,0 +1,41 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Padding op""" +from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType + +padding_op_info = AiCPURegOp("Padding") \ + .fusion_type("OPAQUE") \ + .input(0, "x", "required") \ + .output(0, "y", "required") \ + .attr("pad_dim_size", "int") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I16_Default, DataType.I16_Default) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.U16_Default, DataType.U16_Default) \ + .dtype_format(DataType.U32_Default, DataType.U32_Default) \ + .dtype_format(DataType.U64_Default, DataType.U64_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F64_Default, DataType.F64_Default) \ + .dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \ + .get_op_info() + +@op_info_register(padding_op_info) +def _padding_aicpu(): + """Padding AiCPU register""" + return diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 1602f2594d..b575718668 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -27,7 +27,7 @@ from .array_ops import (Argmax, Argmin, Cast, Concat, Pack, Unpack, Rank, Reshape, ResizeNearestNeighbor, ArgMinWithValue, SameTypeShape, ScatterAdd, ScatterSub, ScatterMul, ScatterDiv, ScatterMax, ScatterMin, ScatterUpdate, ScalarToArray, ScalarToTensor, ScatterNd, ScatterNdUpdate, Select, - Shape, Size, Slice, Split, TransShape, ParallelConcat, + Shape, Size, Slice, Split, TransShape, ParallelConcat, Padding, Squeeze, StridedSlice, Tile, TensorScatterUpdate, Transpose, TruncatedNormal, TupleToArray, UnsortedSegmentMin, UnsortedSegmentProd, UnsortedSegmentSum, SpaceToDepth, DepthToSpace, SpaceToBatch, BatchToSpace, @@ -137,6 +137,7 @@ __all__ = [ 'GatherV2', 'SparseGatherV2', 'EmbeddingLookup', + 'Padding', 'Concat', 'Pack', 'Unpack', diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index 1e28a56db1..918ad6e0e6 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -602,6 +602,46 @@ class SparseGatherV2(GatherV2): """ +class Padding(PrimitiveWithInfer): + """ + Extend the last dimension of input tensor from 1 to pad_dim_size, fill with 0. + + Args: + pad_dim_size (int): The extend value of last dimension of x, must be positive. + + Inputs: + - **x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The rank of x should be at least 2. + The last dimension of x should be 1. + + Outputs: + Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`. + + Examples: + >>> x = Tensor(np.array([[8], [10]]), mindspore.float32) + >>> pad_dim_size = 4 + >>> out = P.Padding(pad_dim_size)(x) + [[8, 0, 0, 0], [10, 0, 0, 0]] + """ + @prim_attr_register + def __init__(self, pad_dim_size=8): + """init padding""" + validator.check_value_type("pad_dim_size", pad_dim_size, [int], self.name) + validator.check_integer("pad_dim_size", pad_dim_size, 0, Rel.GT, self.name) + self.pad_dim_size = pad_dim_size + + def __infer__(self, x): + validator.check_subclass("x", x['dtype'], mstype.tensor, self.name) + x_shape = list(x['shape']) + validator.check_integer("rank of x", len(x_shape), 1, Rel.GT, self.name) + validator.check_integer("last dim of x", x_shape[-1], 1, Rel.EQ, self.name) + out_shape = x_shape + out_shape[-1] = self.pad_dim_size + out = {'shape': out_shape, + 'dtype': x['dtype'], + 'value': None} + return out + + class Split(PrimitiveWithInfer): """ Splits input tensor into output_num of tensors along the given axis and output numbers. diff --git a/tests/st/ops/ascend/test_drop_out_gen_mask.py b/tests/st/ops/ascend/test_drop_out_gen_mask.py index 6771a3a68b..58a37b495c 100644 --- a/tests/st/ops/ascend/test_drop_out_gen_mask.py +++ b/tests/st/ops/ascend/test_drop_out_gen_mask.py @@ -43,4 +43,4 @@ def test_net(): tx, ty = Tensor(x), Tensor(y) output = mask(tx, ty) print(output.asnumpy()) - assert ([255, 255, 255, 255] == output.asnumpy()).all() + assert ([255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255] == output.asnumpy()).all() diff --git a/tests/st/ops/ascend/test_padding.py b/tests/st/ops/ascend/test_padding.py new file mode 100644 index 0000000000..8a8ed19af3 --- /dev/null +++ b/tests/st/ops/ascend/test_padding.py @@ -0,0 +1,40 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import numpy as np + +import mindspore.context as context +import mindspore.nn as nn +import mindspore.common.dtype as mstype +from mindspore import Tensor +from mindspore.ops import operations as P + +context.set_context(mode=context.GRAPH_MODE, + device_target="Ascend") + + +class Net(nn.Cell): + def __init__(self, pad_dim_size): + super(Net, self).__init__() + self.padding = P.Padding(pad_dim_size) + + def construct(self, x): + return self.padding(x) + + +def test_padding(): + x = Tensor(np.array([[8], [10]]), mstype.int32) + padding = Net(4) + out = padding(x) + assert(out.asnumpy() == [[8, 0, 0, 0], [10, 0, 0, 0]]).all()