diff --git a/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc b/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc index e8636ffa2e..3670a2d76f 100644 --- a/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc +++ b/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc @@ -38,10 +38,10 @@ void AicpuMetadataInfo(const CNodePtr &kernel_node, std::vector inputs_format{}; std::vector inputs_type{}; - if (op_name == kPrint) { + if (op_name == kPrint || op_name == kPack) { for (size_t input_index = 0; input_index < AnfAlgo::GetInputTensorNum(kernel_node); ++input_index) { inputs_format.emplace_back(kOpFormat_DEFAULT); inputs_type.push_back(AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, input_index)); diff --git a/mindspore/ccsrc/kernel/aicpu/aicpu_util.h b/mindspore/ccsrc/kernel/aicpu/aicpu_util.h index b6f43414e3..50f7b36d94 100644 --- a/mindspore/ccsrc/kernel/aicpu/aicpu_util.h +++ b/mindspore/ccsrc/kernel/aicpu/aicpu_util.h @@ -28,6 +28,7 @@ constexpr auto kInitDataSetQueue = "InitDataSetQueue"; constexpr auto kInitData = "InitData"; constexpr auto kGetNext = "GetNext"; constexpr auto kPrint = "Print"; +constexpr auto kPack = "Pack"; constexpr auto kOutputTypes = "output_types"; constexpr auto kOutputShapes = "output_shapes"; diff --git a/mindspore/ops/_op_impl/aicpu/__init__.py b/mindspore/ops/_op_impl/aicpu/__init__.py index 58db081de3..7d90d72b88 100644 --- a/mindspore/ops/_op_impl/aicpu/__init__.py +++ b/mindspore/ops/_op_impl/aicpu/__init__.py @@ -28,3 +28,4 @@ from .ctcloss import _ctcloss_aicpu from .rnnt_loss import _rnnt_loss_aicpu from .random_categorical import _random_categorical_aicpu from .reverse_sequence import _reverse_sequence_aicpu +from .pack import _pack_aicpu diff --git a/mindspore/ops/_op_impl/aicpu/pack.py b/mindspore/ops/_op_impl/aicpu/pack.py new file mode 100644 index 0000000000..179651d884 --- /dev/null +++ b/mindspore/ops/_op_impl/aicpu/pack.py @@ -0,0 +1,41 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Pack op""" +from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType + +pack_op_info = AiCPURegOp("Pack") \ + .fusion_type("OPAQUE") \ + .attr("axis", "int") \ + .input(0, "x", "dynamic") \ + .output(0, "y", "required") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I16_Default, DataType.I16_Default) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.U16_Default, DataType.U16_Default) \ + .dtype_format(DataType.U32_Default, DataType.U32_Default) \ + .dtype_format(DataType.U64_Default, DataType.U64_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F64_Default, DataType.F64_Default) \ + .dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \ + .get_op_info() + +@op_info_register(pack_op_info) +def _pack_aicpu(): + """Pack AiCPU register""" + return diff --git a/tests/st/ops/ascend/test_aicpu_ops/test_pack.py b/tests/st/ops/ascend/test_aicpu_ops/test_pack.py new file mode 100644 index 0000000000..affb9b90ef --- /dev/null +++ b/tests/st/ops/ascend/test_aicpu_ops/test_pack.py @@ -0,0 +1,176 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import numpy as np + +import mindspore.context as context +import mindspore.nn as nn +from mindspore import Tensor +from mindspore.ops import operations as P + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +class Net(nn.Cell): + def __init__(self, x, axis): + super(Net, self).__init__() + self.pack = P.Pack(axis) + self.x = x + + def construct(self): + return self.pack(self.x) + + +def test_net_bool(): + x = np.random.randn(3, 5, 4)>0 + y = np.random.randn(3, 5, 4)>0 + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x,y], axis)) + + +def test_net_int8(): + x = np.random.randn(3, 5, 4).astype(np.int8) + y = np.random.randn(3, 5, 4).astype(np.int8) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x,y], axis)) + + +def test_net_uint8(): + x = np.random.randn(3, 5, 4).astype(np.uint8) + y = np.random.randn(3, 5, 4).astype(np.uint8) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x,y], axis)) + + +def test_net_int16(): + x = np.random.randn(3, 5, 4).astype(np.int16) + y = np.random.randn(3, 5, 4).astype(np.int16) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x,y], axis)) + + +def test_net_uint16(): + x = np.random.randn(3, 5, 4).astype(np.uint16) + y = np.random.randn(3, 5, 4).astype(np.uint16) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x,y], axis)) + + +def test_net_int32(): + x = np.random.randn(3, 5, 4).astype(np.int32) + y = np.random.randn(3, 5, 4).astype(np.int32) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x,y], axis)) + + +def test_net_uint32(): + x = np.random.randn(3, 5, 4).astype(np.uint32) + y = np.random.randn(3, 5, 4).astype(np.uint32) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x,y], axis)) + + +def test_net_int64(): + x = np.random.randn(3, 5, 4).astype(np.int64) + y = np.random.randn(3, 5, 4).astype(np.int64) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x,y], axis)) + + +def test_net_uint64(): + x = np.random.randn(3, 5, 4).astype(np.uint64) + y = np.random.randn(3, 5, 4).astype(np.uint64) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x,y], axis)) + + +def test_net_float16(): + x = np.random.randn(3, 5, 4).astype(np.float16) + y = np.random.randn(3, 5, 4).astype(np.float16) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x,y], axis)) + + +def test_net_float32(): + x = np.random.randn(3, 5, 4).astype(np.float32) + y = np.random.randn(3, 5, 4).astype(np.float32) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x,y], axis)) + + +def test_net_float64(): + x = np.random.randn(3, 5, 4).astype(np.float64) + y = np.random.randn(3, 5, 4).astype(np.float64) + axis = -1 + net = Net((Tensor(x), Tensor(y)), axis) + output = net() + print(x) + print(y) + print(output.asnumpy()) + assert np.array_equal(output.asnumpy(), np.stack([x,y], axis))