diff --git a/mindspore/lite/micro/cmake/file_list.cmake b/mindspore/lite/micro/cmake/file_list.cmake index 80b6f3d413..7417c87d5a 100644 --- a/mindspore/lite/micro/cmake/file_list.cmake +++ b/mindspore/lite/micro/cmake/file_list.cmake @@ -78,6 +78,7 @@ set(CODER_OPCODERS_SRC ${MICRO_DIR}/coder/opcoders/nnacl/fp32/tile_fp32_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/transpose_fp32_coder.cc #### nnacl int8 coder + ${MICRO_DIR}/coder/opcoders/nnacl/int8/add_int8_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/int8/concat_int8_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/int8/fullconnection_int8_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/int8/matmul_int8_coder.cc diff --git a/mindspore/lite/micro/cmake/wrapper.cmake b/mindspore/lite/micro/cmake/wrapper.cmake index fa24daa627..5c2fd0c21f 100644 --- a/mindspore/lite/micro/cmake/wrapper.cmake +++ b/mindspore/lite/micro/cmake/wrapper.cmake @@ -4,9 +4,9 @@ set(MICRO_WRAPPER_SRC ${LITE_DIR}/src/runtime/thread_pool.c ${MICRO_DIR}/wrapper/fp32/matmul_fp32_wrapper.c ${MICRO_DIR}/wrapper/int8/matmul_int8_wrapper.c - ${MICRO_DIR}/wrapper/int8/conv_init_int8.c - ${MICRO_DIR}/wrapper/int8/conv1x1_init_int8.c - ${MICRO_DIR}/wrapper/int8/conv1x1_run_int8.c + ${MICRO_DIR}/wrapper/int8/conv_init_int8_wrapper.c + ${MICRO_DIR}/wrapper/int8/conv1x1_init_int8_wrapper.c + ${MICRO_DIR}/wrapper/int8/conv1x1_run_int8_wrapper.c ) list(APPEND FILE_SET ${MICRO_WRAPPER_SRC}) \ No newline at end of file diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.cc index a9dd2ea0ae..be9041267b 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.h index e16a0e3db3..6946459d42 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.cc new file mode 100644 index 0000000000..afe7085610 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.cc @@ -0,0 +1,156 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "micro/coder/opcoders/nnacl/int8/add_int8_coder.h" +#include +#include +#include "nnacl/int8/quantize.h" +#include "micro/coder/log.h" +#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" +#include "micro/coder/opcoders/file_collector.h" + +using mindspore::schema::PrimitiveType_Add; + +namespace mindspore::lite::micro { + +int AddInt8Coder::Prepare(CoderContext *const context) { + input0 = input_tensors().at(0); + input1 = input_tensors().at(1); + MS_CHECK_PTR(input0); + MS_CHECK_PTR(input1); + + MS_CHECK_RET_CODE(Init(), "Init failed"); + MS_CHECK_RET_CODE(ReSize(), "ReSize failed"); + + return RET_OK; +} + +int AddInt8Coder::DoCode(CoderContext *const context) { + Collect(context, {"wrapper/int8/conv1x1_init_int8.h"}, {"add_int8_wrapper.c", "add_int8.c", "thread_pool.c"}); + + nnacl::NNaclInt8Serializer code; + + code.CodeStruct("para", para_); + code.CodeStruct("arith_para", *arith_para_); + code.CodeBaseStruct("AddArgs", "args", "para", "arith_para", in_size_, out_size_, thread_num_s_, elements_num_, + support_opt_add_, input0, input1, output_tensor_); + + if (arith_para_->broadcasting_) { + code.CodeFunction("ParallelLaunch", "THREAD_POOL_DEFAULT", "AddBroadcastRun", "&args", thread_num_s_); + } else { + code.CodeFunction("ParallelLaunch", "THREAD_POOL_DEFAULT", "AddRun", "&args", thread_num_s_); + } + + return RET_OK; +} + +int AddInt8Coder::Init() { + para_.in0_args_.zp_ = input0->quant_params().front().zeroPoint * -1; + para_.in1_args_.zp_ = input1->quant_params().front().zeroPoint * -1; + para_.out_zp_ = output_tensor_->quant_params().front().zeroPoint; + + const double in0_scale = input0->quant_params().front().scale; + const double in1_scale = input1->quant_params().front().scale; + const double out_scale = output_tensor_->quant_params().front().scale; + + para_.left_shift_ = 20; + const double twice_max_input_scale = 2 * std::max(in0_scale, in1_scale); + const double in0_multiplier = in0_scale / twice_max_input_scale; + const double in1_multiplier = in1_scale / twice_max_input_scale; + const double out_multiplier = twice_max_input_scale / ((1 << para_.left_shift_) * out_scale); + + QuantizeMultiplierSmallerThanOne(in0_multiplier, ¶_.in0_args_.multiplier_, ¶_.in0_args_.left_shift_); + QuantizeMultiplierSmallerThanOne(in1_multiplier, ¶_.in1_args_.multiplier_, ¶_.in1_args_.left_shift_); + QuantizeMultiplierSmallerThanOne(out_multiplier, ¶_.out_multiplier_, ¶_.out_left_shift_); + + para_.in0_args_.right_shift_ = -para_.in0_args_.left_shift_ > 0 ? 0 : para_.in0_args_.left_shift_; + para_.in1_args_.right_shift_ = -para_.in1_args_.left_shift_ > 0 ? 0 : para_.in1_args_.left_shift_; + para_.out_right_shift_ = -para_.out_left_shift_ > 0 ? 0 : para_.out_left_shift_; + + para_.in0_args_.left_shift_ = -para_.in0_args_.left_shift_ > 0 ? -para_.in0_args_.left_shift_ : 0; + para_.in1_args_.left_shift_ = -para_.in1_args_.left_shift_ > 0 ? -para_.in1_args_.left_shift_ : 0; + para_.out_left_shift_ = -para_.out_left_shift_ > 0 ? -para_.out_left_shift_ : 0; + + auto act = arith_para_->activation_type_; + CalculateActivationRangeQuantized(act == ActType_Relu, act == ActType_Relu6, para_.out_zp_, + static_cast(out_scale), ¶_.min_, ¶_.max_); + return RET_OK; +} + +int AddInt8Coder::ReSize() { + support_opt_add_ = (input0->ElementsNum() == 1) || (input1->ElementsNum() == 1); + if (support_opt_add_) { + arith_para_->broadcasting_ = false; + } + + elements_num_ = output_tensor_->ElementsNum(); + + arith_para_->in_elements_num0_ = input_tensors_[0]->ElementsNum(); + arith_para_->in_elements_num1_ = input_tensors_[1]->ElementsNum(); + arith_para_->out_elements_num_ = output_tensors_[0]->ElementsNum(); + + for (size_t i = 0; i < input_tensors_.at(0)->shape().size(); i++) { + if (arith_para_->in_shape0_[i] == -1) { + MS_CHECK_RET_CODE(memcpy_s(arith_para_->in_shape0_, std::extentin_shape0_)>::value, + input0->shape().data(), input0->shape().size() * sizeof(int)), + "memcpy failed"); + break; + } + } + for (size_t i = 0; i < input_tensors_.at(1)->shape().size(); i++) { + if (arith_para_->in_shape1_[i] == -1) { + MS_CHECK_RET_CODE(memcpy_s(arith_para_->in_shape1_, std::extentin_shape1_)>::value, + input1->shape().data(), input1->shape().size() * sizeof(int)), + "memcpy failed"); + break; + } + } + for (size_t i = 0; i < output_tensor_->shape().size(); i++) { + if (arith_para_->out_shape_[i] == -1) { + MS_CHECK_RET_CODE(memcpy_s(arith_para_->out_shape_, std::extentout_shape_)>::value, + output_tensor_->shape().data(), output_tensor_->shape().size() * sizeof(int)), + "memcpy failed"); + break; + } + } + + if (arith_para_->broadcasting_) { + size_t break_pos_ = 0; + for (auto i = arith_para_->ndim_ - 1; i >= 0; --i) { + if (arith_para_->in_shape0_[i] != arith_para_->in_shape1_[i]) { + break_pos_ = i; + break; + } + } + in_size_ = 1; + out_size_ = 1; + for (size_t i = 0; i < arith_para_->ndim_; i++) { + if (i > break_pos_) { + in_size_ *= arith_para_->out_shape_[i]; + } else { + out_size_ *= arith_para_->out_shape_[i]; + } + } + + ComputeStrides(arith_para_->in_shape0_, arith_para_->in_strides0_, arith_para_->ndim_); + ComputeStrides(arith_para_->in_shape1_, arith_para_->in_strides1_, arith_para_->ndim_); + ComputeStrides(arith_para_->out_shape_, arith_para_->out_strides_, arith_para_->ndim_); + } + return RET_OK; +} + +REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_Add, CPUOpCoderCreator) +} // namespace mindspore::lite::micro diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.h new file mode 100644 index 0000000000..c1064a89a9 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.h @@ -0,0 +1,53 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_ADD_INT8_CODER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_ADD_INT8_CODER_H_ + +#include +#include "micro/coder/opcoders/op_coder.h" +#include "nnacl/int8/add_int8.h" + +namespace mindspore::lite::micro { +class AddInt8Coder : public OperatorCoder { + public: + AddInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, + const Model::Node *node, size_t node_index, Target target) + : OperatorCoder(in_tensors, out_tensors, node, node_index, target) { + arith_para_ = reinterpret_cast(parameter_); + } + + ~AddInt8Coder() override = default; + + int Prepare(CoderContext *const context) override; + + int DoCode(CoderContext *const context) override; + + private: + int Init(); + int ReSize(); + + AddQuantParameter para_; + ArithmeticParameter *arith_para_{nullptr}; + Tensor *input0{nullptr}; + Tensor *input1{nullptr}; + int in_size_{0}; + int out_size_{0}; + int elements_num_{0}; + bool support_opt_add_{false}; +}; +} // namespace mindspore::lite::micro +#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_ADD_INT8_CODER_H_ diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.cc index 8966f4715e..6b9c1c6608 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -45,8 +45,8 @@ int Conv2D1x1Int8Coder::DoCode(CoderContext *const context) { Collect(context, {"nnacl/int8/conv1x1_int8.h", "nnacl/common_func.h", "wrapper/int8/conv1x1_init_int8.h", "wrapper/int8/conv1x1_run_int8.h"}, - {"common_func.c", "pack.c", "conv1x1_int8.c", "matmul_int8.c", "fixed_point.c", "conv1x1_init_int8.c", - "conv1x1_run_int8.c"}); + {"common_func.c", "pack.c", "conv1x1_int8.c", "matmul_int8.c", "fixed_point.c", "conv1x1_init_int8_wrapper.c", + "conv1x1_run_int8_wrapper.c", "thread_pool.c"}); nnacl::NNaclInt8Serializer code; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h index 04a232484b..0f1d9e144a 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc index 176e5ddbdd..30ccf2f976 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h index eb05af43bf..d32a7aa4c6 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.cc index 8d9a8f2220..0c60ecc62d 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -182,7 +182,7 @@ int Conv2DINT8Coder::Resize() { int Conv2DINT8Coder::DoCode(CoderContext *const context) { Collect(context, {"nnacl/int8/conv_int8.h", "nnacl/common_func.h", "nnacl/kernel/int8/conv_init_int8.h"}, {"common_func.c", "pack.c", "conv_int8.c", "winograd_transform.c", "matmul_int8.c", "fixed_point.c", - "conv_init_int8.c"}); + "conv_init_int8_wrapper.c", "thread_pool.c"}); // call the op function nnacl::NNaclInt8Serializer code; code.precision(kPrecision); diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.h index 52859be068..c89496ce39 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.cc b/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.cc index ea814afb42..fc0e3093fc 100644 --- a/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.cc +++ b/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.cc @@ -70,6 +70,13 @@ void NNaclInt8Serializer::CodeStruct(const std::string &name, const MatMulParame matmul_parameter.b_const_, matmul_parameter.act_type_); } +void NNaclInt8Serializer::CodeStruct(const std::string &name, const AddQuantParameter &add_quant_parameter) { + CodeBaseStruct("AddQuantParameter", name, add_quant_parameter.left_shift_, add_quant_parameter.min_, + add_quant_parameter.max_, add_quant_parameter.in0_args_, add_quant_parameter.in1_args_, + add_quant_parameter.out_zp_, add_quant_parameter.out_left_shift_, add_quant_parameter.out_right_shift_, + add_quant_parameter.out_multiplier_); +} + void NNaclInt8Serializer::CodeStruct(const std::string &name, const ArithmeticParameter &arithmetic_parameter) { CodeBaseStruct("ArithmeticParameter", name, arithmetic_parameter.op_parameter_, arithmetic_parameter.broadcasting_, arithmetic_parameter.ndim_, arithmetic_parameter.activation_type_, diff --git a/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.h b/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.h index 9fe57a7eb7..3d59511609 100644 --- a/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.h +++ b/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.h @@ -20,6 +20,7 @@ #include #include "nnacl/pooling_parameter.h" #include "nnacl/softmax_parameter.h" +#include "nnacl/int8/add_int8.h" #include "nnacl/int8/quantize.h" namespace mindspore::lite::micro { @@ -35,6 +36,11 @@ inline std::ostream &operator<<(std::ostream &code, const OpParameter &tile) { return code; } +inline std::ostream &operator<<(std::ostream &code, const AddQuantQrgs &args) { + code << "{" << args.zp_ << ", " << args.left_shift_ << ", " << args.right_shift_ << ", " << args.multiplier_ << "}"; + return code; +} + inline std::ostream &operator<<(std::ostream &code, PoolMode pool_mode) { code << "(PoolMode)" << "(" << static_cast(pool_mode) << ")"; diff --git a/mindspore/lite/micro/wrapper/int8/add_int8_wrapper.c b/mindspore/lite/micro/wrapper/int8/add_int8_wrapper.c new file mode 100644 index 0000000000..a179c668d5 --- /dev/null +++ b/mindspore/lite/micro/wrapper/int8/add_int8_wrapper.c @@ -0,0 +1,69 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wrapper/int8/add_int8_wrapper.h" +#include "nnacl/errorcode.h" + +int AddBroadcastRun(void *cdata, int task_id) { + AddArgs *args = (AddArgs *)(cdata); + int stride = UP_DIV(args->out_size_, args->thread_count_); + int real_out_count = MSMIN(stride, args->out_size_ - stride * task_id); + if (real_out_count <= 0) { + return NNACL_OK; + } + int8_t *cur_in0 = NULL; + int8_t *cur_in1 = NULL; + int8_t *cur_out = NULL; + for (int i = 0; i < real_out_count; i++) { + if (args->arith_para_->in_elements_num0_ == args->arith_para_->out_elements_num_) { + cur_in0 = args->input0_data_ + task_id * stride * args->in_size_ + i * args->in_size_; + cur_in1 = args->input1_data_; + cur_out = args->output_data_ + task_id * stride * args->in_size_ + i * args->in_size_; + } else { + cur_in0 = args->input0_data_; + cur_in1 = args->input1_data_ + task_id * stride * args->in_size_ + i * args->in_size_; + cur_out = args->output_data_ + task_id * stride * args->in_size_ + i * args->in_size_; + } + AddInt8(cur_in0, cur_in1, cur_out, args->in_size_, &args->para_); + } + return NNACL_OK; +} + +int AddRun(void *cdata, int task_id) { + AddArgs *args = (AddArgs *)(cdata); + /* no need broadcast */ + int stride = UP_DIV(args->elements_num_, args->thread_count_); + int rest_count = args->elements_num_ - task_id * stride; + int real_count = MSMIN(stride, rest_count); + if (real_count <= 0) { + return NNACL_OK; + } + int8_t *cur_in0 = args->input0_data_ + stride * task_id; + int8_t *cur_in1 = args->input1_data_ + stride * task_id; + int8_t *cur_out = args->output_data_ + stride * task_id; + if (args->support_opt_add_) { + int8_t *ptr_in = args->arith_para_->in_elements_num0_ == 1 ? cur_in1 : cur_in0; + int8_t element_in = args->arith_para_->in_elements_num0_ == 1 ? args->input0_data_[0] : args->input1_data_[0]; + AddQuantQrgs *ptr_args = + args->arith_para_->in_elements_num0_ == 1 ? &args->para_.in1_args_ : &args->para_.in0_args_; + AddQuantQrgs *ele_args = + args->arith_para_->in_elements_num0_ == 1 ? &args->para_.in0_args_ : &args->para_.in1_args_; + AddOptInt8(ptr_in, element_in, cur_out, rest_count, &args->para_, ptr_args, ele_args); + } else { + AddInt8(cur_in0, cur_in1, cur_out, rest_count, &args->para_); + } + return NNACL_OK; +} diff --git a/mindspore/lite/micro/wrapper/int8/add_int8_wrapper.h b/mindspore/lite/micro/wrapper/int8/add_int8_wrapper.h new file mode 100644 index 0000000000..c23e32e3ae --- /dev/null +++ b/mindspore/lite/micro/wrapper/int8/add_int8_wrapper.h @@ -0,0 +1,50 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_WRAPPER_INT8_ADD_INT8_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_WRAPPER_INT8_ADD_INT8_WRAPPER_H_ +#include +#include "nnacl/int8/matmul_int8.h" +#include "src/runtime/thread_pool.h" +#include "nnacl/int8/add_int8.h" +#include "nnacl/arithmetic.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + AddQuantParameter para_; + ArithmeticParameter *arith_para_; + int in_size_; + int out_size_; + int thread_count_; + int elements_num_; + bool support_opt_add_; + int8_t *input0_data_; + int8_t *input1_data_; + int8_t *output_data_; +} AddArgs; + +int AddBroadcastRun(void *cdata, int task_id); + +int AddRun(void *cdata, int task_id); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_MICRO_WRAPPER_INT8_ADD_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/wrapper/int8/conv1x1_init_int8.c b/mindspore/lite/micro/wrapper/int8/conv1x1_init_int8_wrapper.c similarity index 98% rename from mindspore/lite/micro/wrapper/int8/conv1x1_init_int8.c rename to mindspore/lite/micro/wrapper/int8/conv1x1_init_int8_wrapper.c index 315efceb2b..109532eb48 100644 --- a/mindspore/lite/micro/wrapper/int8/conv1x1_init_int8.c +++ b/mindspore/lite/micro/wrapper/int8/conv1x1_init_int8_wrapper.c @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "wrapper/int8/conv1x1_init_int8.h" +#include "wrapper/int8/conv1x1_init_int8_wrapper.h" #include #include "nnacl/int8/matmul_int8.h" #include "nnacl/errorcode.h" diff --git a/mindspore/lite/micro/wrapper/int8/conv1x1_init_int8.h b/mindspore/lite/micro/wrapper/int8/conv1x1_init_int8_wrapper.h similarity index 100% rename from mindspore/lite/micro/wrapper/int8/conv1x1_init_int8.h rename to mindspore/lite/micro/wrapper/int8/conv1x1_init_int8_wrapper.h diff --git a/mindspore/lite/micro/wrapper/int8/conv1x1_run_int8.c b/mindspore/lite/micro/wrapper/int8/conv1x1_run_int8_wrapper.c similarity index 99% rename from mindspore/lite/micro/wrapper/int8/conv1x1_run_int8.c rename to mindspore/lite/micro/wrapper/int8/conv1x1_run_int8_wrapper.c index 82e42bf125..ddaa1ecee8 100644 --- a/mindspore/lite/micro/wrapper/int8/conv1x1_run_int8.c +++ b/mindspore/lite/micro/wrapper/int8/conv1x1_run_int8_wrapper.c @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "wrapper/int8/conv1x1_run_int8.h" +#include "wrapper/int8/conv1x1_run_int8_wrapper.h" #include "nnacl/base/conv1x1_base.h" #include "nnacl/int8/matmul_int8.h" #include "nnacl/int8/pack_int8.h" diff --git a/mindspore/lite/micro/wrapper/int8/conv1x1_run_int8.h b/mindspore/lite/micro/wrapper/int8/conv1x1_run_int8_wrapper.h similarity index 100% rename from mindspore/lite/micro/wrapper/int8/conv1x1_run_int8.h rename to mindspore/lite/micro/wrapper/int8/conv1x1_run_int8_wrapper.h diff --git a/mindspore/lite/micro/wrapper/int8/conv_init_int8.c b/mindspore/lite/micro/wrapper/int8/conv_init_int8_wrapper.c similarity index 98% rename from mindspore/lite/micro/wrapper/int8/conv_init_int8.c rename to mindspore/lite/micro/wrapper/int8/conv_init_int8_wrapper.c index 276e9d9902..71e8db0fee 100644 --- a/mindspore/lite/micro/wrapper/int8/conv_init_int8.c +++ b/mindspore/lite/micro/wrapper/int8/conv_init_int8_wrapper.c @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "wrapper/int8/conv_init_int8.h" +#include "wrapper/int8/conv_init_int8_wrapper.h" #include #include "nnacl/op_base.h" #include "nnacl/int8/matmul_int8.h" diff --git a/mindspore/lite/micro/wrapper/int8/conv_init_int8.h b/mindspore/lite/micro/wrapper/int8/conv_init_int8_wrapper.h similarity index 100% rename from mindspore/lite/micro/wrapper/int8/conv_init_int8.h rename to mindspore/lite/micro/wrapper/int8/conv_init_int8_wrapper.h