| @@ -38,10 +38,10 @@ void AicpuMetadataInfo(const CNodePtr &kernel_node, std::vector<std::shared_ptr< | |||||
| return; | return; | ||||
| } | } | ||||
| // For compatibility with the current framework | // For compatibility with the current framework | ||||
| if (op_name == kPrint || op_name == kGetNext) { | |||||
| if (op_name == kPrint || op_name == kGetNext || op_name == kPack) { | |||||
| std::vector<std::string> inputs_format{}; | std::vector<std::string> inputs_format{}; | ||||
| std::vector<TypeId> inputs_type{}; | std::vector<TypeId> inputs_type{}; | ||||
| if (op_name == kPrint) { | |||||
| if (op_name == kPrint || op_name == kPack) { | |||||
| for (size_t input_index = 0; input_index < AnfAlgo::GetInputTensorNum(kernel_node); ++input_index) { | for (size_t input_index = 0; input_index < AnfAlgo::GetInputTensorNum(kernel_node); ++input_index) { | ||||
| inputs_format.emplace_back(kOpFormat_DEFAULT); | inputs_format.emplace_back(kOpFormat_DEFAULT); | ||||
| inputs_type.push_back(AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, input_index)); | inputs_type.push_back(AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, input_index)); | ||||
| @@ -28,6 +28,7 @@ constexpr auto kInitDataSetQueue = "InitDataSetQueue"; | |||||
| constexpr auto kInitData = "InitData"; | constexpr auto kInitData = "InitData"; | ||||
| constexpr auto kGetNext = "GetNext"; | constexpr auto kGetNext = "GetNext"; | ||||
| constexpr auto kPrint = "Print"; | constexpr auto kPrint = "Print"; | ||||
| constexpr auto kPack = "Pack"; | |||||
| constexpr auto kOutputTypes = "output_types"; | constexpr auto kOutputTypes = "output_types"; | ||||
| constexpr auto kOutputShapes = "output_shapes"; | constexpr auto kOutputShapes = "output_shapes"; | ||||
| constexpr auto kChannelName = "channel_name"; | constexpr auto kChannelName = "channel_name"; | ||||
| @@ -24,3 +24,4 @@ from .flatten import _flatten_aicpu | |||||
| from .squeeze import _squeeze_aicpu | from .squeeze import _squeeze_aicpu | ||||
| from .expand_dims import _expand_dims_aicpu | from .expand_dims import _expand_dims_aicpu | ||||
| from .random_choice_with_mask import _random_choice_with_mask_aicpu | from .random_choice_with_mask import _random_choice_with_mask_aicpu | ||||
| from .pack import _pack_aicpu | |||||
| @@ -0,0 +1,41 @@ | |||||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| """Pack op""" | |||||
| from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType | |||||
| pack_op_info = AiCPURegOp("Pack") \ | |||||
| .fusion_type("OPAQUE") \ | |||||
| .attr("axis", "int") \ | |||||
| .input(0, "x", "dynamic") \ | |||||
| .output(0, "y", "required") \ | |||||
| .dtype_format(DataType.I8_Default, DataType.I8_Default) \ | |||||
| .dtype_format(DataType.I16_Default, DataType.I16_Default) \ | |||||
| .dtype_format(DataType.I32_Default, DataType.I32_Default) \ | |||||
| .dtype_format(DataType.I64_Default, DataType.I64_Default) \ | |||||
| .dtype_format(DataType.U8_Default, DataType.U8_Default) \ | |||||
| .dtype_format(DataType.U16_Default, DataType.U16_Default) \ | |||||
| .dtype_format(DataType.U32_Default, DataType.U32_Default) \ | |||||
| .dtype_format(DataType.U64_Default, DataType.U64_Default) \ | |||||
| .dtype_format(DataType.F16_Default, DataType.F16_Default) \ | |||||
| .dtype_format(DataType.F32_Default, DataType.F32_Default) \ | |||||
| .dtype_format(DataType.F64_Default, DataType.F64_Default) \ | |||||
| .dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \ | |||||
| .get_op_info() | |||||
| @op_info_register(pack_op_info) | |||||
| def _pack_aicpu(): | |||||
| """Pack AiCPU register""" | |||||
| return | |||||
| @@ -24,6 +24,7 @@ top_k_op_info = AiCPURegOp("TopK") \ | |||||
| .output(0, "values", "required") \ | .output(0, "values", "required") \ | ||||
| .output(1, "indices", "required") \ | .output(1, "indices", "required") \ | ||||
| .dtype_format(DataType.F16_Default, DataType.I32_Default, DataType.F16_Default, DataType.I32_Default) \ | .dtype_format(DataType.F16_Default, DataType.I32_Default, DataType.F16_Default, DataType.I32_Default) \ | ||||
| .dtype_format(DataType.F32_Default, DataType.I32_Default, DataType.F32_Default, DataType.I32_Default) \ | |||||
| .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ | .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ | ||||
| .get_op_info() | .get_op_info() | ||||
| @@ -0,0 +1,176 @@ | |||||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| import numpy as np | |||||
| import mindspore.context as context | |||||
| import mindspore.nn as nn | |||||
| from mindspore import Tensor | |||||
| from mindspore.ops import operations as P | |||||
| context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | |||||
| class Net(nn.Cell): | |||||
| def __init__(self, x, axis): | |||||
| super(Net, self).__init__() | |||||
| self.pack = P.Pack(axis) | |||||
| self.x = x | |||||
| def construct(self): | |||||
| return self.pack(self.x) | |||||
| def test_net_bool(): | |||||
| x = np.random.randn(3, 5, 4) > 0 | |||||
| y = np.random.randn(3, 5, 4) > 0 | |||||
| axis = -1 | |||||
| net = Net((Tensor(x), Tensor(y)), axis) | |||||
| output = net() | |||||
| print(x) | |||||
| print(y) | |||||
| print(output.asnumpy()) | |||||
| assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) | |||||
| def test_net_int8(): | |||||
| x = np.random.randn(3, 5, 4).astype(np.int8) | |||||
| y = np.random.randn(3, 5, 4).astype(np.int8) | |||||
| axis = -1 | |||||
| net = Net((Tensor(x), Tensor(y)), axis) | |||||
| output = net() | |||||
| print(x) | |||||
| print(y) | |||||
| print(output.asnumpy()) | |||||
| assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) | |||||
| def test_net_uint8(): | |||||
| x = np.random.randn(3, 5, 4).astype(np.uint8) | |||||
| y = np.random.randn(3, 5, 4).astype(np.uint8) | |||||
| axis = -1 | |||||
| net = Net((Tensor(x), Tensor(y)), axis) | |||||
| output = net() | |||||
| print(x) | |||||
| print(y) | |||||
| print(output.asnumpy()) | |||||
| assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) | |||||
| def test_net_int16(): | |||||
| x = np.random.randn(3, 5, 4).astype(np.int16) | |||||
| y = np.random.randn(3, 5, 4).astype(np.int16) | |||||
| axis = -1 | |||||
| net = Net((Tensor(x), Tensor(y)), axis) | |||||
| output = net() | |||||
| print(x) | |||||
| print(y) | |||||
| print(output.asnumpy()) | |||||
| assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) | |||||
| def test_net_uint16(): | |||||
| x = np.random.randn(3, 5, 4).astype(np.uint16) | |||||
| y = np.random.randn(3, 5, 4).astype(np.uint16) | |||||
| axis = -1 | |||||
| net = Net((Tensor(x), Tensor(y)), axis) | |||||
| output = net() | |||||
| print(x) | |||||
| print(y) | |||||
| print(output.asnumpy()) | |||||
| assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) | |||||
| def test_net_int32(): | |||||
| x = np.random.randn(3, 5, 4).astype(np.int32) | |||||
| y = np.random.randn(3, 5, 4).astype(np.int32) | |||||
| axis = -1 | |||||
| net = Net((Tensor(x), Tensor(y)), axis) | |||||
| output = net() | |||||
| print(x) | |||||
| print(y) | |||||
| print(output.asnumpy()) | |||||
| assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) | |||||
| def test_net_uint32(): | |||||
| x = np.random.randn(3, 5, 4).astype(np.uint32) | |||||
| y = np.random.randn(3, 5, 4).astype(np.uint32) | |||||
| axis = -1 | |||||
| net = Net((Tensor(x), Tensor(y)), axis) | |||||
| output = net() | |||||
| print(x) | |||||
| print(y) | |||||
| print(output.asnumpy()) | |||||
| assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) | |||||
| def test_net_int64(): | |||||
| x = np.random.randn(3, 5, 4).astype(np.int64) | |||||
| y = np.random.randn(3, 5, 4).astype(np.int64) | |||||
| axis = -1 | |||||
| net = Net((Tensor(x), Tensor(y)), axis) | |||||
| output = net() | |||||
| print(x) | |||||
| print(y) | |||||
| print(output.asnumpy()) | |||||
| assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) | |||||
| def test_net_uint64(): | |||||
| x = np.random.randn(3, 5, 4).astype(np.uint64) | |||||
| y = np.random.randn(3, 5, 4).astype(np.uint64) | |||||
| axis = -1 | |||||
| net = Net((Tensor(x), Tensor(y)), axis) | |||||
| output = net() | |||||
| print(x) | |||||
| print(y) | |||||
| print(output.asnumpy()) | |||||
| assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) | |||||
| def test_net_float16(): | |||||
| x = np.random.randn(3, 5, 4).astype(np.float16) | |||||
| y = np.random.randn(3, 5, 4).astype(np.float16) | |||||
| axis = -1 | |||||
| net = Net((Tensor(x), Tensor(y)), axis) | |||||
| output = net() | |||||
| print(x) | |||||
| print(y) | |||||
| print(output.asnumpy()) | |||||
| assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) | |||||
| def test_net_float32(): | |||||
| x = np.random.randn(3, 5, 4).astype(np.float32) | |||||
| y = np.random.randn(3, 5, 4).astype(np.float32) | |||||
| axis = -1 | |||||
| net = Net((Tensor(x), Tensor(y)), axis) | |||||
| output = net() | |||||
| print(x) | |||||
| print(y) | |||||
| print(output.asnumpy()) | |||||
| assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) | |||||
| def test_net_float64(): | |||||
| x = np.random.randn(3, 5, 4).astype(np.float64) | |||||
| y = np.random.randn(3, 5, 4).astype(np.float64) | |||||
| axis = -1 | |||||
| net = Net((Tensor(x), Tensor(y)), axis) | |||||
| output = net() | |||||
| print(x) | |||||
| print(y) | |||||
| print(output.asnumpy()) | |||||
| assert np.array_equal(output.asnumpy(), np.stack([x, y], axis)) | |||||