| @@ -523,6 +523,8 @@ inline const PrimitivePtr kPrimTensorListStack = std::make_shared<Primitive>("Te | |||
| inline const PrimitivePtr kPrimTensorListSetItem = std::make_shared<Primitive>("TensorListSetItem"); | |||
| // Maths | |||
| inline const PrimitivePtr kPrimBesselI0 = std::make_shared<Primitive>("BesselI0"); | |||
| inline const PrimitivePtr kPrimBesselI1 = std::make_shared<Primitive>("BesselI1"); | |||
| inline const PrimitivePtr kPrimGer = std::make_shared<Primitive>("Ger"); | |||
| inline const PrimitivePtr kPrimCeil = std::make_shared<Primitive>("Ceil"); | |||
| inline const PrimitivePtr kPrimTensorAdd = std::make_shared<Primitive>("TensorAdd"); | |||
| @@ -0,0 +1,56 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "ops/bessel_i0.h" | |||
| #include <algorithm> | |||
| #include <set> | |||
| #include "ops/op_utils.h" | |||
| #include "utils/check_convert_utils.h" | |||
| #include "abstract/primitive_infer_map.h" | |||
| namespace mindspore { | |||
| namespace ops { | |||
| namespace { | |||
| abstract::ShapePtr BesselI0InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) { | |||
| auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->GetShapeTrack())[kShape]; | |||
| return std::make_shared<abstract::Shape>(in_shape); | |||
| } | |||
| TypePtr BesselI0InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) { | |||
| auto x_type = input_args[kInputIndex0]->BuildType(); | |||
| const std::set<TypePtr> valid_types = {kFloat16, kFloat32}; | |||
| (void)CheckAndConvertUtils::CheckTensorTypeValid("x", x_type, valid_types, prim->name()); | |||
| return x_type; | |||
| } | |||
| } // namespace | |||
| AbstractBasePtr BesselI0Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, | |||
| const std::vector<AbstractBasePtr> &input_args) { | |||
| MS_EXCEPTION_IF_NULL(primitive); | |||
| const int64_t kInputNum = 1; | |||
| (void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, kInputNum, | |||
| primitive->name()); | |||
| for (const auto &item : input_args) { | |||
| MS_EXCEPTION_IF_NULL(item); | |||
| } | |||
| auto infer_type = BesselI0InferType(primitive, input_args); | |||
| auto infer_shape = BesselI0InferShape(primitive, input_args); | |||
| return abstract::MakeAbstract(infer_shape, infer_type); | |||
| } | |||
| REGISTER_PRIMITIVE_EVAL_IMPL(BesselI0, prim::kPrimBesselI0, BesselI0Infer, nullptr, true); | |||
| } // namespace ops | |||
| } // namespace mindspore | |||
| @@ -0,0 +1,42 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_CORE_OPS_BESSEL_I0_H_ | |||
| #define MINDSPORE_CORE_OPS_BESSEL_I0_H_ | |||
| #include <map> | |||
| #include <memory> | |||
| #include <string> | |||
| #include <vector> | |||
| #include "ops/primitive_c.h" | |||
| #include "abstract/abstract_value.h" | |||
| #include "utils/check_convert_utils.h" | |||
| namespace mindspore { | |||
| namespace ops { | |||
| constexpr auto kNameBesselI0 = "BesselI0"; | |||
| class BesselI0 : public PrimitiveC { | |||
| public: | |||
| BesselI0() : PrimitiveC(kNameBesselI0) { InitIOName({"x"}, {"y"}); } | |||
| ~BesselI0() = default; | |||
| MS_DECLARE_PARENT(BesselI0, PrimitiveC); | |||
| }; | |||
| AbstractBasePtr BesselI0Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, | |||
| const std::vector<AbstractBasePtr> &input_args); | |||
| } // namespace ops | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_CORE_OPS_Bessel_I0_H_ | |||
| @@ -0,0 +1,56 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #include "ops/bessel_i1.h" | |||
| #include <algorithm> | |||
| #include <set> | |||
| #include "ops/op_utils.h" | |||
| #include "utils/check_convert_utils.h" | |||
| #include "abstract/primitive_infer_map.h" | |||
| namespace mindspore { | |||
| namespace ops { | |||
| namespace { | |||
| abstract::ShapePtr BesselI1InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) { | |||
| auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->GetShapeTrack())[kShape]; | |||
| return std::make_shared<abstract::Shape>(in_shape); | |||
| } | |||
| TypePtr BesselI1InferType(const PrimitivePtr &prim, const std::vector<AbstractBasePtr> &input_args) { | |||
| auto x_type = input_args[kInputIndex0]->BuildType(); | |||
| const std::set<TypePtr> valid_types = {kFloat16, kFloat32}; | |||
| (void)CheckAndConvertUtils::CheckTensorTypeValid("x", x_type, valid_types, prim->name()); | |||
| return x_type; | |||
| } | |||
| } // namespace | |||
| AbstractBasePtr BesselI1Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, | |||
| const std::vector<AbstractBasePtr> &input_args) { | |||
| MS_EXCEPTION_IF_NULL(primitive); | |||
| const int64_t kInputNum = 1; | |||
| (void)CheckAndConvertUtils::CheckInteger("input number", SizeToLong(input_args.size()), kEqual, kInputNum, | |||
| primitive->name()); | |||
| for (const auto &item : input_args) { | |||
| MS_EXCEPTION_IF_NULL(item); | |||
| } | |||
| auto infer_type = BesselI1InferType(primitive, input_args); | |||
| auto infer_shape = BesselI1InferShape(primitive, input_args); | |||
| return abstract::MakeAbstract(infer_shape, infer_type); | |||
| } | |||
| REGISTER_PRIMITIVE_EVAL_IMPL(BesselI1, prim::kPrimBesselI1, BesselI1Infer, nullptr, true); | |||
| } // namespace ops | |||
| } // namespace mindspore | |||
| @@ -0,0 +1,42 @@ | |||
| /** | |||
| * Copyright 2021 Huawei Technologies Co., Ltd | |||
| * | |||
| * Licensed under the Apache License, Version 2.0 (the "License"); | |||
| * you may not use this file except in compliance with the License. | |||
| * You may obtain a copy of the License at | |||
| * | |||
| * http://www.apache.org/licenses/LICENSE-2.0 | |||
| * | |||
| * Unless required by applicable law or agreed to in writing, software | |||
| * distributed under the License is distributed on an "AS IS" BASIS, | |||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_CORE_OPS_BESSEL_I1_H_ | |||
| #define MINDSPORE_CORE_OPS_BESSEL_I1_H_ | |||
| #include <map> | |||
| #include <memory> | |||
| #include <string> | |||
| #include <vector> | |||
| #include "ops/primitive_c.h" | |||
| #include "abstract/abstract_value.h" | |||
| #include "utils/check_convert_utils.h" | |||
| namespace mindspore { | |||
| namespace ops { | |||
| constexpr auto kNameBesselI1 = "BesselI1"; | |||
| class BesselI1 : public PrimitiveC { | |||
| public: | |||
| BesselI1() : PrimitiveC(kNameBesselI1) { InitIOName({"x"}, {"y"}); } | |||
| ~BesselI1() = default; | |||
| MS_DECLARE_PARENT(BesselI1, PrimitiveC); | |||
| }; | |||
| AbstractBasePtr BesselI1Infer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive, | |||
| const std::vector<AbstractBasePtr> &input_args); | |||
| } // namespace ops | |||
| } // namespace mindspore | |||
| #endif // MINDSPORE_CORE_OPS_Bessel_I1_H_ | |||
| @@ -16,6 +16,7 @@ | |||
| """Define the grad rules of math related operations.""" | |||
| from mindspore.common import dtype as mstype | |||
| import mindspore.numpy as mnp | |||
| import numpy as np | |||
| from .. import functional as F | |||
| from .. import operations as P | |||
| @@ -165,6 +166,33 @@ def get_bprop_erfinv(self): | |||
| return bprop | |||
| @bprop_getters.register(P.BesselI0) | |||
| def get_bprop_bessel_i0(self): | |||
| """Generate bprop for BesselI0""" | |||
| bessel_i1 = P.BesselI1() | |||
| def bprop(x, out, dout): | |||
| dx = dout * bessel_i1(x) | |||
| return (dx,) | |||
| return bprop | |||
| @bprop_getters.register(P.BesselI1) | |||
| def get_bprop_bessel_i1(self): | |||
| """Generate bprop for BesselI1""" | |||
| equal = P.Equal() | |||
| div = P.Div() | |||
| cast = P.Cast() | |||
| dtype = P.DType() | |||
| bessel_i0 = P.BesselI0() | |||
| def bprop(x, out, dout): | |||
| dout_dx = mnp.where(equal(x, 0.), cast(1., dtype(x)), bessel_i0(x) - div(out, x)) | |||
| dx = dout * dout_dx | |||
| return (dx,) | |||
| return bprop | |||
| @bprop_getters.register(P.Trunc) | |||
| def get_bprop_trunc(self): | |||
| """Grad definition for `Trunc` operation.""" | |||
| @@ -19,6 +19,8 @@ from .batchnorm_fold2 import _batchnorm_fold2_tbe | |||
| from .batchnorm_fold2_grad import _batchnorm_fold2_grad_tbe | |||
| from .batchnorm_fold2_grad_reduce import _batchnorm_fold2_grad_reduce_tbe | |||
| from .batchnorm_fold_grad import _batchnorm_fold_grad_tbe | |||
| from .bessel_i0 import _bessel_i0_tbe | |||
| from .bessel_i1 import _bessel_i1_tbe | |||
| from .correction_mul import _correction_mul_tbe | |||
| from .correction_mul_grad import _correction_mul_grad_tbe | |||
| from .fake_learned_scale_quant_perlayer import _fake_learned_scale_quant_perlayer_tbe | |||
| @@ -0,0 +1,112 @@ | |||
| # Copyright 2021 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| # You may obtain a copy of the License at | |||
| # | |||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||
| # | |||
| # Unless required by applicable law or agreed to in writing, software | |||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| """BesselI0 op""" | |||
| from tbe import dsl | |||
| from te import tvm | |||
| from te.platform.fusion_manager import fusion_manager | |||
| from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType | |||
| bessel_i0_op_info = TBERegOp("BesselI0") \ | |||
| .fusion_type("ELEMWISE") \ | |||
| .async_flag(False) \ | |||
| .binfile_name("bessel_i0.so") \ | |||
| .compute_cost(10) \ | |||
| .kernel_name("bessel_i0") \ | |||
| .partial_flag(True) \ | |||
| .op_pattern("formatAgnostic") \ | |||
| .input(0, "x", False, "required", "all") \ | |||
| .output(0, "y", False, "required", "all") \ | |||
| .dtype_format(DataType.F16_None, DataType.F16_None) \ | |||
| .dtype_format(DataType.F32_None, DataType.F32_None) \ | |||
| .get_op_info() | |||
| @op_info_register(bessel_i0_op_info) | |||
| def _bessel_i0_tbe(): | |||
| """BesselI0 TBE register""" | |||
| return | |||
| A = [-1.30002500998624804212E-8, 6.04699502254191894932E-8, | |||
| -2.67079385394061173391E-7, 1.11738753912010371815E-6, | |||
| -4.41673835845875056359E-6, 1.64484480707288970893E-5, | |||
| -5.75419501008210370398E-5, 1.88502885095841655729E-4, | |||
| -5.76375574538582365885E-4, 1.63947561694133579842E-3, | |||
| -4.32430999505057594430E-3, 1.05464603945949983183E-2, | |||
| -2.37374148058994688156E-2, 4.93052842396707084878E-2, | |||
| -9.49010970480476444210E-2, 1.71620901522208775349E-1, | |||
| -3.04682672343198398683E-1, 6.76795274409476084995E-1] | |||
| B = [3.39623202570838634515E-9, 2.26666899049817806459E-8, | |||
| 2.04891858946906374183E-7, 2.89137052083475648297E-6, | |||
| 6.88975834691682398426E-5, 3.36911647825569408990E-3, | |||
| 8.04490411014108831608E-1] | |||
| def chebevl(x, num, coef, shape, dtype): | |||
| """chebevl""" | |||
| broad_coef = dsl.broadcast(coef[0], shape, dtype) | |||
| broad_zero = dsl.broadcast(0, shape, dtype) | |||
| none_signal = None | |||
| for i in range(1, num): | |||
| none_signal = broad_zero | |||
| broad_zero = broad_coef | |||
| coef_i = dsl.broadcast(coef[i], shape, dtype) | |||
| broad_coef = dsl.vsub(dsl.vadd(dsl.vmul(x, broad_zero), coef_i), none_signal) | |||
| return dsl.vmuls(dsl.vsub(broad_coef, none_signal), 0.5) | |||
| @fusion_manager.register("bessel_i0") | |||
| def bessel_i0_compute(input_x, output, kernel_name="bessel_i0"): | |||
| """bessel_i0_compute""" | |||
| dtype = input_x.dtype | |||
| shape = input_x.shape | |||
| has_improve_precision = False | |||
| if dtype != "float32": | |||
| input_x = dsl.cast_to(input_x, "float32") | |||
| dtype = "float32" | |||
| has_improve_precision = True | |||
| y = dsl.vabs(input_x) | |||
| y_le_eight_in = dsl.vmuls(y, 0.5) | |||
| y_le_eight_in = dsl.vadds(y_le_eight_in, -2.0) | |||
| y_le_eight = chebevl(y_le_eight_in, 18, A, shape, dtype) | |||
| y_gt_eight_in = dsl.vadds(dsl.vmuls(dsl.vrec(y), 32.0), -2.0) | |||
| y_gt_eight = chebevl(y_gt_eight_in, 7, B, shape, dtype) | |||
| y_gt_eight = dsl.vmul(y_gt_eight, dsl.vrsqrt(y)) | |||
| res = dsl.vcmpsel(y, 8.0, 'le', y_le_eight, y_gt_eight) | |||
| res = dsl.vmul(res, dsl.vexp(y)) | |||
| if has_improve_precision: | |||
| res = dsl.cast_to(res, "float16") | |||
| return res | |||
| def bessel_i0(x, output, kernel_name="bessel_i0"): | |||
| """bessel_i0""" | |||
| data_x = tvm.placeholder(x.get("shape"), dtype=x.get("dtype"), name="data_x") | |||
| res = bessel_i0_compute(data_x, output, kernel_name) | |||
| # auto schedule | |||
| with tvm.target.cce(): | |||
| schedule = dsl.auto_schedule(res) | |||
| # operator build | |||
| config = {"name": kernel_name, | |||
| "tensor_list": [data_x, res]} | |||
| dsl.build(schedule, config) | |||
| @@ -0,0 +1,123 @@ | |||
| # Copyright 2021 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| # You may obtain a copy of the License at | |||
| # | |||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||
| # | |||
| # Unless required by applicable law or agreed to in writing, software | |||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| """BesselI1 op""" | |||
| from tbe import dsl | |||
| from te import tvm | |||
| from te.platform.fusion_manager import fusion_manager | |||
| from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType | |||
| bessel_i1_op_info = TBERegOp("BesselI1") \ | |||
| .fusion_type("ELEMWISE") \ | |||
| .async_flag(False) \ | |||
| .binfile_name("bessel_i1.so") \ | |||
| .compute_cost(10) \ | |||
| .kernel_name("bessel_i1") \ | |||
| .partial_flag(True) \ | |||
| .op_pattern("formatAgnostic") \ | |||
| .input(0, "x", False, "required", "all") \ | |||
| .output(0, "y", False, "required", "all") \ | |||
| .dtype_format(DataType.F16_None, DataType.F16_None) \ | |||
| .dtype_format(DataType.F32_None, DataType.F32_None) \ | |||
| .get_op_info() | |||
| @op_info_register(bessel_i1_op_info) | |||
| def _bessel_i1_tbe(): | |||
| """BesselI1 TBE register""" | |||
| return | |||
| A = [2.77791411276104639959E-18, -2.11142121435816608115E-17, | |||
| 1.55363195773620046921E-16, -1.10559694773538630805E-15, | |||
| 7.60068429473540693410E-15, -5.04218550472791168711E-14, | |||
| 3.22379336594557470981E-13, -1.98397439776494371520E-12, | |||
| 1.17361862988909016308E-11, -6.66348972350202774223E-11, | |||
| 3.62559028155211703701E-10, -1.88724975172282928790E-9, | |||
| 9.38153738649577178388E-9, -4.44505912879632808065E-8, | |||
| 2.00329475355213526229E-7, -8.56872026469545474066E-7, | |||
| 3.47025130813767847674E-6, -1.32731636560394358279E-5, | |||
| 4.78156510755005422638E-5, -1.61760815825896745588E-4, | |||
| 5.12285956168575772895E-4, -1.51357245063125314899E-3, | |||
| 4.15642294431288815669E-3, -1.05640848946261981558E-2, | |||
| 2.47264490306265168283E-2, -5.29459812080949914269E-2, | |||
| 1.02643658689847095384E-1, -1.76416518357834055153E-1, | |||
| 2.52587186443633654823E-1] | |||
| B = [ | |||
| 7.51729631084210481353E-18, 4.41434832307170791151E-18, | |||
| -4.65030536848935832153E-17, -3.20952592199342395980E-17, | |||
| 2.96262899764595013876E-16, 3.30820231092092828324E-16, | |||
| -1.88035477551078244854E-15, -3.81440307243700780478E-15, | |||
| 1.04202769841288027642E-14, 4.27244001671195135429E-14, | |||
| -2.10154184277266431302E-14, -4.08355111109219731823E-13, | |||
| -7.19855177624590851209E-13, 2.03562854414708950722E-12, | |||
| 1.41258074366137813316E-11, 3.25260358301548823856E-11, | |||
| -1.89749581235054123450E-11, -5.58974346219658380687E-10, | |||
| -3.83538038596423702205E-9, -2.63146884688951950684E-8, | |||
| -2.51223623787020892529E-7, -3.88256480887769039346E-6, | |||
| -1.10588938762623716291E-4, -9.76109749136146840777E-3, | |||
| 7.78576235018280120474E-1] | |||
| def chebevl(x, num, coef, shape, dtype): | |||
| """chebevl""" | |||
| broad_coef = dsl.broadcast(coef[0], shape, dtype) | |||
| broad_zero = dsl.broadcast(0, shape, dtype) | |||
| none_signal = None | |||
| for i in range(1, num): | |||
| none_signal = broad_zero | |||
| broad_zero = broad_coef | |||
| coef_i = dsl.broadcast(coef[i], shape, dtype) | |||
| broad_coef = dsl.vsub(dsl.vadd(dsl.vmul(x, broad_zero), coef_i), none_signal) | |||
| return dsl.vmuls(dsl.vsub(broad_coef, none_signal), 0.5) | |||
| @fusion_manager.register("bessel_i1") | |||
| def bessel_i1_compute(input_x, output_y, kernel_name="bessel_i1"): | |||
| """bessel_i1_compute""" | |||
| dtype = input_x.dtype | |||
| shape = input_x.shape | |||
| has_improve_precision = False | |||
| if dtype != "float32": | |||
| input_x = dsl.cast_to(input_x, "float32") | |||
| dtype = "float32" | |||
| has_improve_precision = True | |||
| y = dsl.vabs(input_x) | |||
| y_le_eight = dsl.vmul(y, chebevl(dsl.vadds(dsl.vmuls(y, 0.5), -2), 29, A, shape, dtype)) | |||
| y_gt_eight = chebevl(dsl.vadds(dsl.vmuls(dsl.vrec(y), 32.0), -2.0), 25, B, shape, dtype) | |||
| y = dsl.vcmpsel(y, 8.0, 'le', y_le_eight, y_gt_eight) | |||
| res = dsl.vcmpsel(input_x, 0, 'lt', dsl.vmuls(y, -1.0), y) | |||
| res = dsl.vmul(res, dsl.vexp(dsl.vabs(input_x))) | |||
| if has_improve_precision: | |||
| res = dsl.cast_to(res, "float16") | |||
| return res | |||
| def bessel_i1(x, y, kernel_name="bessel_i1"): | |||
| """bessel_i1""" | |||
| data_x = tvm.placeholder(x.get("shape"), dtype=x.get("dtype"), name="data_x") | |||
| res = bessel_i1_compute(data_x, y, kernel_name) | |||
| # auto schedule | |||
| with tvm.target.cce(): | |||
| schedule = dsl.auto_schedule(res) | |||
| # operator build | |||
| config = {"name": kernel_name, | |||
| "tensor_list": [data_x, res]} | |||
| dsl.build(schedule, config) | |||
| @@ -57,9 +57,9 @@ from .math_ops import (Abs, ACos, Asin, Asinh, AddN, AccumulateNV2, AssignAdd, A | |||
| NPUAllocFloatStatus, NPUClearFloatStatus, LinSpace, | |||
| NPUGetFloatStatus, Pow, RealDiv, IsNan, IsInf, IsFinite, FloatStatus, | |||
| Reciprocal, CumSum, HistogramFixedWidth, SquaredDifference, Xdivy, Xlogy, | |||
| Sin, Sqrt, Rsqrt, BesselI0e, BesselI1e, TruncateDiv, TruncateMod, Addcdiv, Addcmul, | |||
| Square, Sub, TensorAdd, Add, Sign, Round, SquareSumAll, Atan, Atanh, Cosh, Sinh, Eps, Tan, | |||
| MatrixInverse, IndexAdd, Erfinv, Conj, Real, Imag, Complex, Trunc, IsClose) | |||
| Sin, Sqrt, Rsqrt, BesselI0, BesselI1, BesselI0e, BesselI1e, TruncateDiv, TruncateMod, Addcdiv, | |||
| Addcmul, Square, Sub, TensorAdd, Add, Sign, Round, SquareSumAll, Atan, Atanh, Cosh, Sinh, Eps, | |||
| Tan, MatrixInverse, IndexAdd, Erfinv, Conj, Real, Imag, Complex, Trunc, IsClose) | |||
| from .random_ops import (RandomChoiceWithMask, StandardNormal, Gamma, Poisson, UniformInt, UniformReal, | |||
| RandomCategorical, StandardLaplace, Multinomial, UniformCandidateSampler, | |||
| @@ -443,6 +443,8 @@ __all__ = [ | |||
| "BitwiseAnd", | |||
| "BitwiseOr", | |||
| "BitwiseXor", | |||
| "BesselI0", | |||
| "BesselI1", | |||
| "BesselI0e", | |||
| "BesselI1e", | |||
| "Atan", | |||
| @@ -5035,6 +5035,68 @@ class BitwiseXor(_BitwiseBinaryOp): | |||
| """ | |||
| class BesselI0(Primitive): | |||
| """ | |||
| Computes BesselI0 of input element-wise. | |||
| Inputs: | |||
| - **x** (Tensor) - The shape of tensor is | |||
| :math:`(N,*)` where :math:`*` means, any number of additional dimensions. | |||
| Data type must be float16, float32 or float64. | |||
| Outputs: | |||
| Tensor, has the same shape as `x`. | |||
| Raises: | |||
| TypeError: If `x` is not a Tensor of float16, float32. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> bessel_i0 = ops.BesselI0() | |||
| >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32) | |||
| >>> output = bessel_i0(x) | |||
| >>> print(output) | |||
| [1.014452 1.179784 1.0241697 1.0020261] | |||
| """ | |||
| @prim_attr_register | |||
| def __init__(self): | |||
| """Initialize BesselI0""" | |||
| class BesselI1(Primitive): | |||
| """ | |||
| Computes BesselI1 of input element-wise. | |||
| Inputs: | |||
| - **x** (Tensor) - The shape of tensor is | |||
| :math:`(N,*)` where :math:`*` means, any number of additional dimensions. | |||
| Data type must be float16, float32 or float64. | |||
| Outputs: | |||
| Tensor, has the same shape as `x`. | |||
| Raises: | |||
| TypeError: If `x` is not a Tensor of float16, float32. | |||
| Supported Platforms: | |||
| ``Ascend`` | |||
| Examples: | |||
| >>> bessel_i1 = ops.BesselI1() | |||
| >>> x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32) | |||
| >>> output = bessel_i1(x) | |||
| >>> print(output) | |||
| [0.1208661 0.45177728 0.1568694 0.04504559] | |||
| """ | |||
| @prim_attr_register | |||
| def __init__(self): | |||
| """Initialize BesselI1""" | |||
| class BesselI0e(Primitive): | |||
| r""" | |||
| Computes BesselI0e of input element-wise. | |||
| @@ -1722,6 +1722,14 @@ test_case_math_ops = [ | |||
| 'desc_const': [1], | |||
| 'desc_inputs': [Tensor(np.array([[True, False], [True, True]]))], | |||
| 'desc_bprop': []}), | |||
| ('BesselI0', { | |||
| 'block': P.BesselI0(), | |||
| 'desc_inputs': [[2, 3]], | |||
| 'desc_bprop': [[2, 3]]}), | |||
| ('BesselI1', { | |||
| 'block': P.BesselI1(), | |||
| 'desc_inputs': [[2, 3]], | |||
| 'desc_bprop': [[2, 3]]}), | |||
| ('BesselI0e', { | |||
| 'block': P.BesselI0e(), | |||
| 'desc_inputs': [[2, 3]], | |||