Browse Source

!196 add Pack op for aicpu when axis=-1

Merge pull request !196 from yanzhenxiang2020/br_pack_incubator
tags/v0.6.0-beta
mindspore-ci-bot Gitee 5 years ago
parent
commit
73894e6cfd
5 changed files with 221 additions and 2 deletions
  1. +2
    -2
      mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc
  2. +1
    -0
      mindspore/ccsrc/kernel/aicpu/aicpu_util.h
  3. +1
    -0
      mindspore/ops/_op_impl/aicpu/__init__.py
  4. +41
    -0
      mindspore/ops/_op_impl/aicpu/pack.py
  5. +176
    -0
      tests/st/ops/ascend/test_aicpu_ops/test_pack.py

+ 2
- 2
mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc View File

@@ -38,10 +38,10 @@ void AicpuMetadataInfo(const CNodePtr &kernel_node, std::vector<std::shared_ptr<
return; return;
} }
// For compatibility with the current framework // For compatibility with the current framework
if (op_name == kPrint || op_name == kGetNext) {
if (op_name == kPrint || op_name == kGetNext || op_name == kPack) {
std::vector<std::string> inputs_format{}; std::vector<std::string> inputs_format{};
std::vector<TypeId> inputs_type{}; std::vector<TypeId> inputs_type{};
if (op_name == kPrint) {
if (op_name == kPrint || op_name == kPack) {
for (size_t input_index = 0; input_index < AnfAlgo::GetInputTensorNum(kernel_node); ++input_index) { for (size_t input_index = 0; input_index < AnfAlgo::GetInputTensorNum(kernel_node); ++input_index) {
inputs_format.emplace_back(kOpFormat_DEFAULT); inputs_format.emplace_back(kOpFormat_DEFAULT);
inputs_type.push_back(AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, input_index)); inputs_type.push_back(AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, input_index));


+ 1
- 0
mindspore/ccsrc/kernel/aicpu/aicpu_util.h View File

@@ -28,6 +28,7 @@ constexpr auto kInitDataSetQueue = "InitDataSetQueue";
constexpr auto kInitData = "InitData"; constexpr auto kInitData = "InitData";
constexpr auto kGetNext = "GetNext"; constexpr auto kGetNext = "GetNext";
constexpr auto kPrint = "Print"; constexpr auto kPrint = "Print";
constexpr auto kPack = "Pack";


constexpr auto kOutputTypes = "output_types"; constexpr auto kOutputTypes = "output_types";
constexpr auto kOutputShapes = "output_shapes"; constexpr auto kOutputShapes = "output_shapes";


+ 1
- 0
mindspore/ops/_op_impl/aicpu/__init__.py View File

@@ -28,3 +28,4 @@ from .ctcloss import _ctcloss_aicpu
from .rnnt_loss import _rnnt_loss_aicpu from .rnnt_loss import _rnnt_loss_aicpu
from .random_categorical import _random_categorical_aicpu from .random_categorical import _random_categorical_aicpu
from .reverse_sequence import _reverse_sequence_aicpu from .reverse_sequence import _reverse_sequence_aicpu
from .pack import _pack_aicpu

+ 41
- 0
mindspore/ops/_op_impl/aicpu/pack.py View File

@@ -0,0 +1,41 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""Pack op"""
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType

pack_op_info = AiCPURegOp("Pack") \
.fusion_type("OPAQUE") \
.attr("axis", "int") \
.input(0, "x", "dynamic") \
.output(0, "y", "required") \
.dtype_format(DataType.I8_Default, DataType.I8_Default) \
.dtype_format(DataType.I16_Default, DataType.I16_Default) \
.dtype_format(DataType.I32_Default, DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.I64_Default) \
.dtype_format(DataType.U8_Default, DataType.U8_Default) \
.dtype_format(DataType.U16_Default, DataType.U16_Default) \
.dtype_format(DataType.U32_Default, DataType.U32_Default) \
.dtype_format(DataType.U64_Default, DataType.U64_Default) \
.dtype_format(DataType.F16_Default, DataType.F16_Default) \
.dtype_format(DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.F64_Default, DataType.F64_Default) \
.dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \
.get_op_info()

@op_info_register(pack_op_info)
def _pack_aicpu():
"""Pack AiCPU register"""
return

+ 176
- 0
tests/st/ops/ascend/test_aicpu_ops/test_pack.py View File

@@ -0,0 +1,176 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np

import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P

context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")


class Net(nn.Cell):
def __init__(self, x, axis):
super(Net, self).__init__()
self.pack = P.Pack(axis)
self.x = x

def construct(self):
return self.pack(self.x)


def test_net_bool():
x = np.random.randn(3, 5, 4)>0
y = np.random.randn(3, 5, 4)>0
axis = -1
net = Net((Tensor(x), Tensor(y)), axis)
output = net()
print(x)
print(y)
print(output.asnumpy())
assert np.array_equal(output.asnumpy(), np.stack([x,y], axis))


def test_net_int8():
x = np.random.randn(3, 5, 4).astype(np.int8)
y = np.random.randn(3, 5, 4).astype(np.int8)
axis = -1
net = Net((Tensor(x), Tensor(y)), axis)
output = net()
print(x)
print(y)
print(output.asnumpy())
assert np.array_equal(output.asnumpy(), np.stack([x,y], axis))


def test_net_uint8():
x = np.random.randn(3, 5, 4).astype(np.uint8)
y = np.random.randn(3, 5, 4).astype(np.uint8)
axis = -1
net = Net((Tensor(x), Tensor(y)), axis)
output = net()
print(x)
print(y)
print(output.asnumpy())
assert np.array_equal(output.asnumpy(), np.stack([x,y], axis))


def test_net_int16():
x = np.random.randn(3, 5, 4).astype(np.int16)
y = np.random.randn(3, 5, 4).astype(np.int16)
axis = -1
net = Net((Tensor(x), Tensor(y)), axis)
output = net()
print(x)
print(y)
print(output.asnumpy())
assert np.array_equal(output.asnumpy(), np.stack([x,y], axis))


def test_net_uint16():
x = np.random.randn(3, 5, 4).astype(np.uint16)
y = np.random.randn(3, 5, 4).astype(np.uint16)
axis = -1
net = Net((Tensor(x), Tensor(y)), axis)
output = net()
print(x)
print(y)
print(output.asnumpy())
assert np.array_equal(output.asnumpy(), np.stack([x,y], axis))


def test_net_int32():
x = np.random.randn(3, 5, 4).astype(np.int32)
y = np.random.randn(3, 5, 4).astype(np.int32)
axis = -1
net = Net((Tensor(x), Tensor(y)), axis)
output = net()
print(x)
print(y)
print(output.asnumpy())
assert np.array_equal(output.asnumpy(), np.stack([x,y], axis))


def test_net_uint32():
x = np.random.randn(3, 5, 4).astype(np.uint32)
y = np.random.randn(3, 5, 4).astype(np.uint32)
axis = -1
net = Net((Tensor(x), Tensor(y)), axis)
output = net()
print(x)
print(y)
print(output.asnumpy())
assert np.array_equal(output.asnumpy(), np.stack([x,y], axis))


def test_net_int64():
x = np.random.randn(3, 5, 4).astype(np.int64)
y = np.random.randn(3, 5, 4).astype(np.int64)
axis = -1
net = Net((Tensor(x), Tensor(y)), axis)
output = net()
print(x)
print(y)
print(output.asnumpy())
assert np.array_equal(output.asnumpy(), np.stack([x,y], axis))


def test_net_uint64():
x = np.random.randn(3, 5, 4).astype(np.uint64)
y = np.random.randn(3, 5, 4).astype(np.uint64)
axis = -1
net = Net((Tensor(x), Tensor(y)), axis)
output = net()
print(x)
print(y)
print(output.asnumpy())
assert np.array_equal(output.asnumpy(), np.stack([x,y], axis))


def test_net_float16():
x = np.random.randn(3, 5, 4).astype(np.float16)
y = np.random.randn(3, 5, 4).astype(np.float16)
axis = -1
net = Net((Tensor(x), Tensor(y)), axis)
output = net()
print(x)
print(y)
print(output.asnumpy())
assert np.array_equal(output.asnumpy(), np.stack([x,y], axis))


def test_net_float32():
x = np.random.randn(3, 5, 4).astype(np.float32)
y = np.random.randn(3, 5, 4).astype(np.float32)
axis = -1
net = Net((Tensor(x), Tensor(y)), axis)
output = net()
print(x)
print(y)
print(output.asnumpy())
assert np.array_equal(output.asnumpy(), np.stack([x,y], axis))


def test_net_float64():
x = np.random.randn(3, 5, 4).astype(np.float64)
y = np.random.randn(3, 5, 4).astype(np.float64)
axis = -1
net = Net((Tensor(x), Tensor(y)), axis)
output = net()
print(x)
print(y)
print(output.asnumpy())
assert np.array_equal(output.asnumpy(), np.stack([x,y], axis))

Loading…
Cancel
Save