diff --git a/mindspore/core/abstract/infer_functions.h b/mindspore/core/abstract/infer_functions.h index 1f684ef556..a4d78d35cd 100644 --- a/mindspore/core/abstract/infer_functions.h +++ b/mindspore/core/abstract/infer_functions.h @@ -270,6 +270,8 @@ AbstractBasePtr InferImplSplit(const AnalysisEnginePtr &, const PrimitivePtr &pr AbstractBasePtr InferImplSequenceMask(const AnalysisEnginePtr &, const PrimitivePtr &primitive, const AbstractBasePtrList &args_spec_list); +AbstractBasePtr InferImplAddN(const AnalysisEnginePtr &, const PrimitivePtr &primitive, + const AbstractBasePtrList &args_spec_list); template AbstractBasePtr InferTupleOrListOrDictLen(const std::string &op_name, const AbstractBasePtrList &args_spec_list) { // Inputs: a tuple or list or dict. diff --git a/mindspore/core/abstract/prim_maths.cc b/mindspore/core/abstract/prim_maths.cc index 3bd5e1b686..80919abea3 100644 --- a/mindspore/core/abstract/prim_maths.cc +++ b/mindspore/core/abstract/prim_maths.cc @@ -229,5 +229,14 @@ AbstractBasePtr InferImplLinSpace(const AnalysisEnginePtr &, const PrimitivePtr std::make_shared(start->element(), std::make_shared(shape, min_shape, max_shape)); return ret; } +AbstractBasePtr InferImplAddN(const AnalysisEnginePtr &, const PrimitivePtr &primitive, + const AbstractBasePtrList &args_spec_list) { + const std::string op_name = primitive->name(); + if (args_spec_list.size() < 1) { + MS_LOG(EXCEPTION) << "AddN operation must have at least one input."; + } + auto input = CheckArg(op_name, args_spec_list, 0); + return input->Broaden(); +} } // namespace abstract } // namespace mindspore diff --git a/mindspore/core/abstract/primitive_infer_map.cc b/mindspore/core/abstract/primitive_infer_map.cc index 17aa4bbc6e..2f2d08b110 100644 --- a/mindspore/core/abstract/primitive_infer_map.cc +++ b/mindspore/core/abstract/primitive_infer_map.cc @@ -47,6 +47,7 @@ PrimitiveEvalImplMap &GetPrimitiveToEvalImplMap() { {prim::kPrimMinimum, {InferImplMinimum, true}}, {prim::kPrimDivNoNan, {InferImplDivNoNan, true}}, {prim::kPrimLinSpace, {InferImplLinSpace, true}}, + {prim::kPrimAddN, {InferImplAddN, true}}, // Array {prim::kPrimScalarToArray, {InferImplScalarToArray, true}}, {prim::kPrimArrayToScalar, {InferImplArrayToScalar, true}}, diff --git a/mindspore/core/base/core_ops.h b/mindspore/core/base/core_ops.h index a743e18ede..6e65b650cd 100644 --- a/mindspore/core/base/core_ops.h +++ b/mindspore/core/base/core_ops.h @@ -200,7 +200,7 @@ inline const PrimitivePtr kPrimBroadcast = std::make_shared("Broadcas inline const PrimitivePtr kPrimAllGather = std::make_shared("AllGather"); inline const PrimitivePtr kPrimReduceScatter = std::make_shared("ReduceScatter"); inline const PrimitivePtr kPrimMemCpyAsync = std::make_shared("memcpy_async"); - +inline const PrimitivePtr kPrimFill = std::make_shared("Fill"); // RowTensor inline const PrimitivePtr kPrimMakeRowTensor = std::make_shared("MakeRowTensor"); inline const PrimitivePtr kPrimRowTensorGetValues = std::make_shared("RowTensorGetValues"); diff --git a/mindspore/ops/_op_impl/tbe/__init__.py b/mindspore/ops/_op_impl/tbe/__init__.py index 8fa3b4ed9f..aa9ecc6269 100644 --- a/mindspore/ops/_op_impl/tbe/__init__.py +++ b/mindspore/ops/_op_impl/tbe/__init__.py @@ -25,6 +25,7 @@ from .acosh_grad import _acosh_grad_tbe from .adam_apply_one_with_decay import _adam_apply_one_with_decay_tbe from .apply_centered_rms_prop import _apply_centered_rms_prop_tbe from .add_n import _add_n_tbe +from .add_n_ds import _add_n_ds_tbe from .accumulate_n_v2 import _accumulate_n_v2_tbe from .apply_ftrl import _apply_ftrl_tbe from .apply_momentum import _apply_momentum_tbe @@ -204,6 +205,7 @@ from .fused_mul_add import _fused_mul_add_tbe from .fused_mul_add_n import _fused_mul_add_n_tbe from .fused_mul_apply_momentum import _fused_mul_apply_momentum_tbe from .fill import _fill_op_tbe +from .fill_ds import _fill_ds_op_tbe from .erf import _erf_op_tbe from .erfc import _erfc_op_tbe from .depthwise_conv2d import _depthwise_conv2d_tbe diff --git a/mindspore/ops/_op_impl/tbe/add_n_ds.py b/mindspore/ops/_op_impl/tbe/add_n_ds.py new file mode 100644 index 0000000000..447ce885ce --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/add_n_ds.py @@ -0,0 +1,40 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""AddN op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +add_n_ds_op_info = TBERegOp("AddN") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("add_n.so") \ + .compute_cost(10) \ + .kernel_name("add_n") \ + .partial_flag(True) \ + .dynamic_shape(True) \ + .attr("n", "required", "int", "all") \ + .input(0, "x", False, "dynamic", "all") \ + .output(0, "y", False, "required", "all") \ + .op_pattern("broadcast") \ + .dtype_format(DataType.F16_None, DataType.F16_None) \ + .dtype_format(DataType.F32_None, DataType.F32_None) \ + .dtype_format(DataType.I32_None, DataType.I32_None) \ + .get_op_info() + + +@op_info_register(add_n_ds_op_info) +def _add_n_ds_tbe(): + """AddN TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/fill_ds.py b/mindspore/ops/_op_impl/tbe/fill_ds.py new file mode 100644 index 0000000000..c0a560ef87 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/fill_ds.py @@ -0,0 +1,42 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Fill op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +fill_ds_op_info = TBERegOp("Fill") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("fill.so") \ + .compute_cost(10) \ + .kernel_name("fill") \ + .partial_flag(True) \ + .dynamic_shape(True) \ + .input(0, "dims", False, "required", "all") \ + .input(1, "value", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.I32_Default, DataType.F16_Default, DataType.I16_Default) \ + .dtype_format(DataType.I64_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I64_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.I64_Default, DataType.F16_Default, DataType.F16_Default) \ + .get_op_info() + + +@op_info_register(fill_ds_op_info) +def _fill_ds_op_tbe(): + """Fill TBE register""" + return