Browse Source

!12454 add add coder

From: @zhujingxuan
Reviewed-by: @wangchengyuan,@wangchengyuan
Signed-off-by: @wangchengyuan,@wangchengyuan
tags/v1.2.0-rc1
mindspore-ci-bot Gitee 4 years ago
parent
commit
b3350132eb
22 changed files with 359 additions and 17 deletions
  1. +1
    -0
      mindspore/lite/micro/cmake/file_list.cmake
  2. +3
    -3
      mindspore/lite/micro/cmake/wrapper.cmake
  3. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.cc
  4. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.h
  5. +156
    -0
      mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.cc
  6. +53
    -0
      mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.h
  7. +3
    -3
      mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.cc
  8. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h
  9. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc
  10. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h
  11. +2
    -2
      mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.cc
  12. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.h
  13. +7
    -0
      mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.cc
  14. +6
    -0
      mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.h
  15. +69
    -0
      mindspore/lite/micro/wrapper/int8/add_int8_wrapper.c
  16. +50
    -0
      mindspore/lite/micro/wrapper/int8/add_int8_wrapper.h
  17. +1
    -1
      mindspore/lite/micro/wrapper/int8/conv1x1_init_int8_wrapper.c
  18. +0
    -0
      mindspore/lite/micro/wrapper/int8/conv1x1_init_int8_wrapper.h
  19. +1
    -1
      mindspore/lite/micro/wrapper/int8/conv1x1_run_int8_wrapper.c
  20. +0
    -0
      mindspore/lite/micro/wrapper/int8/conv1x1_run_int8_wrapper.h
  21. +1
    -1
      mindspore/lite/micro/wrapper/int8/conv_init_int8_wrapper.c
  22. +0
    -0
      mindspore/lite/micro/wrapper/int8/conv_init_int8_wrapper.h

+ 1
- 0
mindspore/lite/micro/cmake/file_list.cmake View File

@@ -78,6 +78,7 @@ set(CODER_OPCODERS_SRC
${MICRO_DIR}/coder/opcoders/nnacl/fp32/tile_fp32_coder.cc
${MICRO_DIR}/coder/opcoders/nnacl/fp32/transpose_fp32_coder.cc
#### nnacl int8 coder
${MICRO_DIR}/coder/opcoders/nnacl/int8/add_int8_coder.cc
${MICRO_DIR}/coder/opcoders/nnacl/int8/concat_int8_coder.cc
${MICRO_DIR}/coder/opcoders/nnacl/int8/fullconnection_int8_coder.cc
${MICRO_DIR}/coder/opcoders/nnacl/int8/matmul_int8_coder.cc


+ 3
- 3
mindspore/lite/micro/cmake/wrapper.cmake View File

@@ -4,9 +4,9 @@ set(MICRO_WRAPPER_SRC
${LITE_DIR}/src/runtime/thread_pool.c
${MICRO_DIR}/wrapper/fp32/matmul_fp32_wrapper.c
${MICRO_DIR}/wrapper/int8/matmul_int8_wrapper.c
${MICRO_DIR}/wrapper/int8/conv_init_int8.c
${MICRO_DIR}/wrapper/int8/conv1x1_init_int8.c
${MICRO_DIR}/wrapper/int8/conv1x1_run_int8.c
${MICRO_DIR}/wrapper/int8/conv_init_int8_wrapper.c
${MICRO_DIR}/wrapper/int8/conv1x1_init_int8_wrapper.c
${MICRO_DIR}/wrapper/int8/conv1x1_run_int8_wrapper.c
)

list(APPEND FILE_SET ${MICRO_WRAPPER_SRC})

+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 156
- 0
mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.cc View File

@@ -0,0 +1,156 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "micro/coder/opcoders/nnacl/int8/add_int8_coder.h"
#include <algorithm>
#include <type_traits>
#include "nnacl/int8/quantize.h"
#include "micro/coder/log.h"
#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h"
#include "micro/coder/opcoders/file_collector.h"

using mindspore::schema::PrimitiveType_Add;

namespace mindspore::lite::micro {

int AddInt8Coder::Prepare(CoderContext *const context) {
input0 = input_tensors().at(0);
input1 = input_tensors().at(1);
MS_CHECK_PTR(input0);
MS_CHECK_PTR(input1);

MS_CHECK_RET_CODE(Init(), "Init failed");
MS_CHECK_RET_CODE(ReSize(), "ReSize failed");

return RET_OK;
}

int AddInt8Coder::DoCode(CoderContext *const context) {
Collect(context, {"wrapper/int8/conv1x1_init_int8.h"}, {"add_int8_wrapper.c", "add_int8.c", "thread_pool.c"});

nnacl::NNaclInt8Serializer code;

code.CodeStruct("para", para_);
code.CodeStruct("arith_para", *arith_para_);
code.CodeBaseStruct("AddArgs", "args", "para", "arith_para", in_size_, out_size_, thread_num_s_, elements_num_,
support_opt_add_, input0, input1, output_tensor_);

if (arith_para_->broadcasting_) {
code.CodeFunction("ParallelLaunch", "THREAD_POOL_DEFAULT", "AddBroadcastRun", "&args", thread_num_s_);
} else {
code.CodeFunction("ParallelLaunch", "THREAD_POOL_DEFAULT", "AddRun", "&args", thread_num_s_);
}

return RET_OK;
}

int AddInt8Coder::Init() {
para_.in0_args_.zp_ = input0->quant_params().front().zeroPoint * -1;
para_.in1_args_.zp_ = input1->quant_params().front().zeroPoint * -1;
para_.out_zp_ = output_tensor_->quant_params().front().zeroPoint;

const double in0_scale = input0->quant_params().front().scale;
const double in1_scale = input1->quant_params().front().scale;
const double out_scale = output_tensor_->quant_params().front().scale;

para_.left_shift_ = 20;
const double twice_max_input_scale = 2 * std::max(in0_scale, in1_scale);
const double in0_multiplier = in0_scale / twice_max_input_scale;
const double in1_multiplier = in1_scale / twice_max_input_scale;
const double out_multiplier = twice_max_input_scale / ((1 << para_.left_shift_) * out_scale);

QuantizeMultiplierSmallerThanOne(in0_multiplier, &para_.in0_args_.multiplier_, &para_.in0_args_.left_shift_);
QuantizeMultiplierSmallerThanOne(in1_multiplier, &para_.in1_args_.multiplier_, &para_.in1_args_.left_shift_);
QuantizeMultiplierSmallerThanOne(out_multiplier, &para_.out_multiplier_, &para_.out_left_shift_);

para_.in0_args_.right_shift_ = -para_.in0_args_.left_shift_ > 0 ? 0 : para_.in0_args_.left_shift_;
para_.in1_args_.right_shift_ = -para_.in1_args_.left_shift_ > 0 ? 0 : para_.in1_args_.left_shift_;
para_.out_right_shift_ = -para_.out_left_shift_ > 0 ? 0 : para_.out_left_shift_;

para_.in0_args_.left_shift_ = -para_.in0_args_.left_shift_ > 0 ? -para_.in0_args_.left_shift_ : 0;
para_.in1_args_.left_shift_ = -para_.in1_args_.left_shift_ > 0 ? -para_.in1_args_.left_shift_ : 0;
para_.out_left_shift_ = -para_.out_left_shift_ > 0 ? -para_.out_left_shift_ : 0;

auto act = arith_para_->activation_type_;
CalculateActivationRangeQuantized(act == ActType_Relu, act == ActType_Relu6, para_.out_zp_,
static_cast<float>(out_scale), &para_.min_, &para_.max_);
return RET_OK;
}

int AddInt8Coder::ReSize() {
support_opt_add_ = (input0->ElementsNum() == 1) || (input1->ElementsNum() == 1);
if (support_opt_add_) {
arith_para_->broadcasting_ = false;
}

elements_num_ = output_tensor_->ElementsNum();

arith_para_->in_elements_num0_ = input_tensors_[0]->ElementsNum();
arith_para_->in_elements_num1_ = input_tensors_[1]->ElementsNum();
arith_para_->out_elements_num_ = output_tensors_[0]->ElementsNum();

for (size_t i = 0; i < input_tensors_.at(0)->shape().size(); i++) {
if (arith_para_->in_shape0_[i] == -1) {
MS_CHECK_RET_CODE(memcpy_s(arith_para_->in_shape0_, std::extent<decltype(arith_para_->in_shape0_)>::value,
input0->shape().data(), input0->shape().size() * sizeof(int)),
"memcpy failed");
break;
}
}
for (size_t i = 0; i < input_tensors_.at(1)->shape().size(); i++) {
if (arith_para_->in_shape1_[i] == -1) {
MS_CHECK_RET_CODE(memcpy_s(arith_para_->in_shape1_, std::extent<decltype(arith_para_->in_shape1_)>::value,
input1->shape().data(), input1->shape().size() * sizeof(int)),
"memcpy failed");
break;
}
}
for (size_t i = 0; i < output_tensor_->shape().size(); i++) {
if (arith_para_->out_shape_[i] == -1) {
MS_CHECK_RET_CODE(memcpy_s(arith_para_->out_shape_, std::extent<decltype(arith_para_->out_shape_)>::value,
output_tensor_->shape().data(), output_tensor_->shape().size() * sizeof(int)),
"memcpy failed");
break;
}
}

if (arith_para_->broadcasting_) {
size_t break_pos_ = 0;
for (auto i = arith_para_->ndim_ - 1; i >= 0; --i) {
if (arith_para_->in_shape0_[i] != arith_para_->in_shape1_[i]) {
break_pos_ = i;
break;
}
}
in_size_ = 1;
out_size_ = 1;
for (size_t i = 0; i < arith_para_->ndim_; i++) {
if (i > break_pos_) {
in_size_ *= arith_para_->out_shape_[i];
} else {
out_size_ *= arith_para_->out_shape_[i];
}
}

ComputeStrides(arith_para_->in_shape0_, arith_para_->in_strides0_, arith_para_->ndim_);
ComputeStrides(arith_para_->in_shape1_, arith_para_->in_strides1_, arith_para_->ndim_);
ComputeStrides(arith_para_->out_shape_, arith_para_->out_strides_, arith_para_->ndim_);
}
return RET_OK;
}

REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_Add, CPUOpCoderCreator<AddInt8Coder>)
} // namespace mindspore::lite::micro

+ 53
- 0
mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.h View File

@@ -0,0 +1,53 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_ADD_INT8_CODER_H_
#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_ADD_INT8_CODER_H_

#include <vector>
#include "micro/coder/opcoders/op_coder.h"
#include "nnacl/int8/add_int8.h"

namespace mindspore::lite::micro {
class AddInt8Coder : public OperatorCoder {
public:
AddInt8Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)
: OperatorCoder(in_tensors, out_tensors, node, node_index, target) {
arith_para_ = reinterpret_cast<ArithmeticParameter *>(parameter_);
}

~AddInt8Coder() override = default;

int Prepare(CoderContext *const context) override;

int DoCode(CoderContext *const context) override;

private:
int Init();
int ReSize();

AddQuantParameter para_;
ArithmeticParameter *arith_para_{nullptr};
Tensor *input0{nullptr};
Tensor *input1{nullptr};
int in_size_{0};
int out_size_{0};
int elements_num_{0};
bool support_opt_add_{false};
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_ADD_INT8_CODER_H_

+ 3
- 3
mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -45,8 +45,8 @@ int Conv2D1x1Int8Coder::DoCode(CoderContext *const context) {
Collect(context,
{"nnacl/int8/conv1x1_int8.h", "nnacl/common_func.h", "wrapper/int8/conv1x1_init_int8.h",
"wrapper/int8/conv1x1_run_int8.h"},
{"common_func.c", "pack.c", "conv1x1_int8.c", "matmul_int8.c", "fixed_point.c", "conv1x1_init_int8.c",
"conv1x1_run_int8.c"});
{"common_func.c", "pack.c", "conv1x1_int8.c", "matmul_int8.c", "fixed_point.c", "conv1x1_init_int8_wrapper.c",
"conv1x1_run_int8_wrapper.c", "thread_pool.c"});

nnacl::NNaclInt8Serializer code;



+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 2
- 2
mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.cc View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -182,7 +182,7 @@ int Conv2DINT8Coder::Resize() {
int Conv2DINT8Coder::DoCode(CoderContext *const context) {
Collect(context, {"nnacl/int8/conv_int8.h", "nnacl/common_func.h", "nnacl/kernel/int8/conv_init_int8.h"},
{"common_func.c", "pack.c", "conv_int8.c", "winograd_transform.c", "matmul_int8.c", "fixed_point.c",
"conv_init_int8.c"});
"conv_init_int8_wrapper.c", "thread_pool.c"});
// call the op function
nnacl::NNaclInt8Serializer code;
code.precision(kPrecision);


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.h View File

@@ -1,5 +1,5 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.


+ 7
- 0
mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.cc View File

@@ -70,6 +70,13 @@ void NNaclInt8Serializer::CodeStruct(const std::string &name, const MatMulParame
matmul_parameter.b_const_, matmul_parameter.act_type_);
}

void NNaclInt8Serializer::CodeStruct(const std::string &name, const AddQuantParameter &add_quant_parameter) {
CodeBaseStruct("AddQuantParameter", name, add_quant_parameter.left_shift_, add_quant_parameter.min_,
add_quant_parameter.max_, add_quant_parameter.in0_args_, add_quant_parameter.in1_args_,
add_quant_parameter.out_zp_, add_quant_parameter.out_left_shift_, add_quant_parameter.out_right_shift_,
add_quant_parameter.out_multiplier_);
}

void NNaclInt8Serializer::CodeStruct(const std::string &name, const ArithmeticParameter &arithmetic_parameter) {
CodeBaseStruct("ArithmeticParameter", name, arithmetic_parameter.op_parameter_, arithmetic_parameter.broadcasting_,
arithmetic_parameter.ndim_, arithmetic_parameter.activation_type_,


+ 6
- 0
mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.h View File

@@ -20,6 +20,7 @@
#include <string>
#include "nnacl/pooling_parameter.h"
#include "nnacl/softmax_parameter.h"
#include "nnacl/int8/add_int8.h"
#include "nnacl/int8/quantize.h"

namespace mindspore::lite::micro {
@@ -35,6 +36,11 @@ inline std::ostream &operator<<(std::ostream &code, const OpParameter &tile) {
return code;
}

inline std::ostream &operator<<(std::ostream &code, const AddQuantQrgs &args) {
code << "{" << args.zp_ << ", " << args.left_shift_ << ", " << args.right_shift_ << ", " << args.multiplier_ << "}";
return code;
}

inline std::ostream &operator<<(std::ostream &code, PoolMode pool_mode) {
code << "(PoolMode)"
<< "(" << static_cast<int>(pool_mode) << ")";


+ 69
- 0
mindspore/lite/micro/wrapper/int8/add_int8_wrapper.c View File

@@ -0,0 +1,69 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "wrapper/int8/add_int8_wrapper.h"
#include "nnacl/errorcode.h"

int AddBroadcastRun(void *cdata, int task_id) {
AddArgs *args = (AddArgs *)(cdata);
int stride = UP_DIV(args->out_size_, args->thread_count_);
int real_out_count = MSMIN(stride, args->out_size_ - stride * task_id);
if (real_out_count <= 0) {
return NNACL_OK;
}
int8_t *cur_in0 = NULL;
int8_t *cur_in1 = NULL;
int8_t *cur_out = NULL;
for (int i = 0; i < real_out_count; i++) {
if (args->arith_para_->in_elements_num0_ == args->arith_para_->out_elements_num_) {
cur_in0 = args->input0_data_ + task_id * stride * args->in_size_ + i * args->in_size_;
cur_in1 = args->input1_data_;
cur_out = args->output_data_ + task_id * stride * args->in_size_ + i * args->in_size_;
} else {
cur_in0 = args->input0_data_;
cur_in1 = args->input1_data_ + task_id * stride * args->in_size_ + i * args->in_size_;
cur_out = args->output_data_ + task_id * stride * args->in_size_ + i * args->in_size_;
}
AddInt8(cur_in0, cur_in1, cur_out, args->in_size_, &args->para_);
}
return NNACL_OK;
}

int AddRun(void *cdata, int task_id) {
AddArgs *args = (AddArgs *)(cdata);
/* no need broadcast */
int stride = UP_DIV(args->elements_num_, args->thread_count_);
int rest_count = args->elements_num_ - task_id * stride;
int real_count = MSMIN(stride, rest_count);
if (real_count <= 0) {
return NNACL_OK;
}
int8_t *cur_in0 = args->input0_data_ + stride * task_id;
int8_t *cur_in1 = args->input1_data_ + stride * task_id;
int8_t *cur_out = args->output_data_ + stride * task_id;
if (args->support_opt_add_) {
int8_t *ptr_in = args->arith_para_->in_elements_num0_ == 1 ? cur_in1 : cur_in0;
int8_t element_in = args->arith_para_->in_elements_num0_ == 1 ? args->input0_data_[0] : args->input1_data_[0];
AddQuantQrgs *ptr_args =
args->arith_para_->in_elements_num0_ == 1 ? &args->para_.in1_args_ : &args->para_.in0_args_;
AddQuantQrgs *ele_args =
args->arith_para_->in_elements_num0_ == 1 ? &args->para_.in0_args_ : &args->para_.in1_args_;
AddOptInt8(ptr_in, element_in, cur_out, rest_count, &args->para_, ptr_args, ele_args);
} else {
AddInt8(cur_in0, cur_in1, cur_out, rest_count, &args->para_);
}
return NNACL_OK;
}

+ 50
- 0
mindspore/lite/micro/wrapper/int8/add_int8_wrapper.h View File

@@ -0,0 +1,50 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_LITE_MICRO_WRAPPER_INT8_ADD_INT8_WRAPPER_H_
#define MINDSPORE_LITE_MICRO_WRAPPER_INT8_ADD_INT8_WRAPPER_H_
#include <string.h>
#include "nnacl/int8/matmul_int8.h"
#include "src/runtime/thread_pool.h"
#include "nnacl/int8/add_int8.h"
#include "nnacl/arithmetic.h"

#ifdef __cplusplus
extern "C" {
#endif

typedef struct {
AddQuantParameter para_;
ArithmeticParameter *arith_para_;
int in_size_;
int out_size_;
int thread_count_;
int elements_num_;
bool support_opt_add_;
int8_t *input0_data_;
int8_t *input1_data_;
int8_t *output_data_;
} AddArgs;

int AddBroadcastRun(void *cdata, int task_id);

int AddRun(void *cdata, int task_id);

#ifdef __cplusplus
}
#endif

#endif // MINDSPORE_LITE_MICRO_WRAPPER_INT8_ADD_INT8_WRAPPER_H_

mindspore/lite/micro/wrapper/int8/conv1x1_init_int8.c → mindspore/lite/micro/wrapper/int8/conv1x1_init_int8_wrapper.c View File

@@ -14,7 +14,7 @@
* limitations under the License.
*/

#include "wrapper/int8/conv1x1_init_int8.h"
#include "wrapper/int8/conv1x1_init_int8_wrapper.h"
#include <memory.h>
#include "nnacl/int8/matmul_int8.h"
#include "nnacl/errorcode.h"

mindspore/lite/micro/wrapper/int8/conv1x1_init_int8.h → mindspore/lite/micro/wrapper/int8/conv1x1_init_int8_wrapper.h View File


mindspore/lite/micro/wrapper/int8/conv1x1_run_int8.c → mindspore/lite/micro/wrapper/int8/conv1x1_run_int8_wrapper.c View File

@@ -14,7 +14,7 @@
* limitations under the License.
*/

#include "wrapper/int8/conv1x1_run_int8.h"
#include "wrapper/int8/conv1x1_run_int8_wrapper.h"
#include "nnacl/base/conv1x1_base.h"
#include "nnacl/int8/matmul_int8.h"
#include "nnacl/int8/pack_int8.h"

mindspore/lite/micro/wrapper/int8/conv1x1_run_int8.h → mindspore/lite/micro/wrapper/int8/conv1x1_run_int8_wrapper.h View File


mindspore/lite/micro/wrapper/int8/conv_init_int8.c → mindspore/lite/micro/wrapper/int8/conv_init_int8_wrapper.c View File

@@ -14,7 +14,7 @@
* limitations under the License.
*/

#include "wrapper/int8/conv_init_int8.h"
#include "wrapper/int8/conv_init_int8_wrapper.h"
#include <memory.h>
#include "nnacl/op_base.h"
#include "nnacl/int8/matmul_int8.h"

mindspore/lite/micro/wrapper/int8/conv_init_int8.h → mindspore/lite/micro/wrapper/int8/conv_init_int8_wrapper.h View File


Loading…
Cancel
Save