Browse Source

add op dynamic shape

dynamic mark

add infer function

fill op dynamic shape

addn infer function

bug fix

check addn input num

format py
tags/v1.1.0
hwjiaorui 5 years ago
parent
commit
ba5ca7ec93
7 changed files with 97 additions and 1 deletions
  1. +2
    -0
      mindspore/core/abstract/infer_functions.h
  2. +9
    -0
      mindspore/core/abstract/prim_maths.cc
  3. +1
    -0
      mindspore/core/abstract/primitive_infer_map.cc
  4. +1
    -1
      mindspore/core/base/core_ops.h
  5. +2
    -0
      mindspore/ops/_op_impl/tbe/__init__.py
  6. +40
    -0
      mindspore/ops/_op_impl/tbe/add_n_ds.py
  7. +42
    -0
      mindspore/ops/_op_impl/tbe/fill_ds.py

+ 2
- 0
mindspore/core/abstract/infer_functions.h View File

@@ -270,6 +270,8 @@ AbstractBasePtr InferImplSplit(const AnalysisEnginePtr &, const PrimitivePtr &pr
AbstractBasePtr InferImplSequenceMask(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);

AbstractBasePtr InferImplAddN(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list);
template <typename T>
AbstractBasePtr InferTupleOrListOrDictLen(const std::string &op_name, const AbstractBasePtrList &args_spec_list) {
// Inputs: a tuple or list or dict.


+ 9
- 0
mindspore/core/abstract/prim_maths.cc View File

@@ -229,5 +229,14 @@ AbstractBasePtr InferImplLinSpace(const AnalysisEnginePtr &, const PrimitivePtr
std::make_shared<AbstractTensor>(start->element(), std::make_shared<Shape>(shape, min_shape, max_shape));
return ret;
}
AbstractBasePtr InferImplAddN(const AnalysisEnginePtr &, const PrimitivePtr &primitive,
const AbstractBasePtrList &args_spec_list) {
const std::string op_name = primitive->name();
if (args_spec_list.size() < 1) {
MS_LOG(EXCEPTION) << "AddN operation must have at least one input.";
}
auto input = CheckArg<AbstractTensor>(op_name, args_spec_list, 0);
return input->Broaden();
}
} // namespace abstract
} // namespace mindspore

+ 1
- 0
mindspore/core/abstract/primitive_infer_map.cc View File

@@ -47,6 +47,7 @@ PrimitiveEvalImplMap &GetPrimitiveToEvalImplMap() {
{prim::kPrimMinimum, {InferImplMinimum, true}},
{prim::kPrimDivNoNan, {InferImplDivNoNan, true}},
{prim::kPrimLinSpace, {InferImplLinSpace, true}},
{prim::kPrimAddN, {InferImplAddN, true}},
// Array
{prim::kPrimScalarToArray, {InferImplScalarToArray, true}},
{prim::kPrimArrayToScalar, {InferImplArrayToScalar, true}},


+ 1
- 1
mindspore/core/base/core_ops.h View File

@@ -200,7 +200,7 @@ inline const PrimitivePtr kPrimBroadcast = std::make_shared<Primitive>("Broadcas
inline const PrimitivePtr kPrimAllGather = std::make_shared<Primitive>("AllGather");
inline const PrimitivePtr kPrimReduceScatter = std::make_shared<Primitive>("ReduceScatter");
inline const PrimitivePtr kPrimMemCpyAsync = std::make_shared<Primitive>("memcpy_async");
inline const PrimitivePtr kPrimFill = std::make_shared<Primitive>("Fill");
// RowTensor
inline const PrimitivePtr kPrimMakeRowTensor = std::make_shared<Primitive>("MakeRowTensor");
inline const PrimitivePtr kPrimRowTensorGetValues = std::make_shared<Primitive>("RowTensorGetValues");


+ 2
- 0
mindspore/ops/_op_impl/tbe/__init__.py View File

@@ -25,6 +25,7 @@ from .acosh_grad import _acosh_grad_tbe
from .adam_apply_one_with_decay import _adam_apply_one_with_decay_tbe
from .apply_centered_rms_prop import _apply_centered_rms_prop_tbe
from .add_n import _add_n_tbe
from .add_n_ds import _add_n_ds_tbe
from .accumulate_n_v2 import _accumulate_n_v2_tbe
from .apply_ftrl import _apply_ftrl_tbe
from .apply_momentum import _apply_momentum_tbe
@@ -204,6 +205,7 @@ from .fused_mul_add import _fused_mul_add_tbe
from .fused_mul_add_n import _fused_mul_add_n_tbe
from .fused_mul_apply_momentum import _fused_mul_apply_momentum_tbe
from .fill import _fill_op_tbe
from .fill_ds import _fill_ds_op_tbe
from .erf import _erf_op_tbe
from .erfc import _erfc_op_tbe
from .depthwise_conv2d import _depthwise_conv2d_tbe


+ 40
- 0
mindspore/ops/_op_impl/tbe/add_n_ds.py View File

@@ -0,0 +1,40 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""AddN op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType

add_n_ds_op_info = TBERegOp("AddN") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("add_n.so") \
.compute_cost(10) \
.kernel_name("add_n") \
.partial_flag(True) \
.dynamic_shape(True) \
.attr("n", "required", "int", "all") \
.input(0, "x", False, "dynamic", "all") \
.output(0, "y", False, "required", "all") \
.op_pattern("broadcast") \
.dtype_format(DataType.F16_None, DataType.F16_None) \
.dtype_format(DataType.F32_None, DataType.F32_None) \
.dtype_format(DataType.I32_None, DataType.I32_None) \
.get_op_info()


@op_info_register(add_n_ds_op_info)
def _add_n_ds_tbe():
"""AddN TBE register"""
return

+ 42
- 0
mindspore/ops/_op_impl/tbe/fill_ds.py View File

@@ -0,0 +1,42 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""Fill op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType

fill_ds_op_info = TBERegOp("Fill") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("fill.so") \
.compute_cost(10) \
.kernel_name("fill") \
.partial_flag(True) \
.dynamic_shape(True) \
.input(0, "dims", False, "required", "all") \
.input(1, "value", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \
.dtype_format(DataType.I32_Default, DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.I32_Default, DataType.F16_Default, DataType.I16_Default) \
.dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.I32_Default) \
.dtype_format(DataType.I64_Default, DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.I64_Default, DataType.F16_Default, DataType.F16_Default) \
.get_op_info()


@op_info_register(fill_ds_op_info)
def _fill_ds_op_tbe():
"""Fill TBE register"""
return

Loading…
Cancel
Save