Browse Source

add final to coders, remove redundant namespace symbol, fix parameter bug

tags/v1.2.0-rc1
zhujingxuan 5 years ago
parent
commit
0aa9fc0483
50 changed files with 52 additions and 53 deletions
  1. +1
    -1
      mindspore/lite/micro/coder/opcoders/base/dtype_cast_coder.h
  2. +1
    -1
      mindspore/lite/micro/coder/opcoders/base/quant_dtype_cast_coder.h
  3. +1
    -1
      mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.h
  4. +1
    -1
      mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.h
  5. +1
    -1
      mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.h
  6. +1
    -1
      mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.h
  7. +1
    -1
      mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.h
  8. +1
    -1
      mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/reshape_int8_coder.h
  9. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/activation_fp32_coder.cc
  10. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/addn_fp32_coder.cc
  11. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/addn_fp32_coder.h
  12. +2
    -3
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_fp32_coder.cc
  13. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_fp32_coder.h
  14. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_self_fp32_coder.cc
  15. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/assign_add_fp32_coder.cc
  16. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/assign_add_fp32_coder.h
  17. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/batchnorm_fp32_coder.cc
  18. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/concat_fp32_coder.cc
  19. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/concat_fp32_coder.h
  20. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/expand_dims_fp32_coder.cc
  21. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/expand_dims_fp32_coder.h
  22. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/gather_fp32_coder.cc
  23. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/gather_fp32_coder.h
  24. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/nchw2nhwc_fp32_coder.cc
  25. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/nchw2nhwc_fp32_coder.h
  26. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/nhwc2nchw_fp32_coder.cc
  27. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/nhwc2nchw_fp32_coder.h
  28. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/pad_fp32_coder.cc
  29. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/pad_fp32_coder.h
  30. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/pooling_fp32_coder.cc
  31. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/power_fp32_coder.cc
  32. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/power_fp32_coder.h
  33. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/reduce_fp32_coder.h
  34. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/reshape_fp32_coder.cc
  35. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/reshape_fp32_coder.h
  36. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/scale_fp32_coder.cc
  37. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/slice_fp32_coder.cc
  38. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/slice_fp32_coder.h
  39. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/squeeze_dims_fp32_coder.cc
  40. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/squeeze_dims_fp32_coder.h
  41. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/tile_fp32_coder.cc
  42. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/tile_fp32_coder.h
  43. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/fp32/transpose_fp32_coder.cc
  44. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/int8/concat_int8_coder.cc
  45. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/int8/concat_int8_coder.h
  46. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/int8/pooling_int8_coder.cc
  47. +2
    -2
      mindspore/lite/micro/coder/opcoders/nnacl/int8/reduce_int8_coder.cc
  48. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/int8/reduce_int8_coder.h
  49. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/int8/reshape_int8_coder.cc
  50. +1
    -1
      mindspore/lite/micro/coder/opcoders/nnacl/int8/reshape_int8_coder.h

+ 1
- 1
mindspore/lite/micro/coder/opcoders/base/dtype_cast_coder.h View File

@@ -23,7 +23,7 @@
#include "nnacl/int8/quant_dtype_cast_int8.h"

namespace mindspore::lite::micro {
class DTypeCastCoder : public OperatorCoder {
class DTypeCastCoder final : public OperatorCoder {
public:
DTypeCastCoder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/base/quant_dtype_cast_coder.h View File

@@ -23,7 +23,7 @@
#include "nnacl/int8/quant_dtype_cast_int8.h"

namespace mindspore::lite::micro {
class QuantDTypeCastCoder : public OperatorCoder {
class QuantDTypeCastCoder final : public OperatorCoder {
public:
QuantDTypeCastCoder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.h View File

@@ -22,7 +22,7 @@

namespace mindspore::lite::micro::cmsis {

class AddInt8Coder : public OperatorCoder {
class AddInt8Coder final : public OperatorCoder {
public:
AddInt8Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.h View File

@@ -24,7 +24,7 @@

namespace mindspore::lite::micro::cmsis {

class Conv2DInt8Coder : public Conv2DBaseCoder {
class Conv2DInt8Coder final : public Conv2DBaseCoder {
public:
explicit Conv2DInt8Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.h View File

@@ -22,7 +22,7 @@
#include "src/runtime/kernel/arm/int8/convolution_depthwise_int8.h"

namespace mindspore::lite::micro::cmsis {
class DWConvInt8Coder : public Conv2DBaseCoder {
class DWConvInt8Coder final : public Conv2DBaseCoder {
public:
DWConvInt8Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.h View File

@@ -23,7 +23,7 @@
#include "micro/coder/opcoders/base/full_connection_base_coder.h"
#include "nnacl/int8/quantize.h"
namespace mindspore::lite::micro::cmsis {
class FullConnectionInt8Coder : public FullConnectionBaseCoder {
class FullConnectionInt8Coder final : public FullConnectionBaseCoder {
public:
FullConnectionInt8Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.h View File

@@ -21,7 +21,7 @@
#include "micro/coder/opcoders/op_coder.h"

namespace mindspore::lite::micro::cmsis {
class MulInt8Coder : public OperatorCoder {
class MulInt8Coder final : public OperatorCoder {
public:
MulInt8Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/reshape_int8_coder.h View File

@@ -21,7 +21,7 @@
#include "micro/coder/opcoders/op_coder.h"

namespace mindspore::lite::micro::cmsis {
class ReshapeInt8Coder : public OperatorCoder {
class ReshapeInt8Coder final : public OperatorCoder {
public:
ReshapeInt8Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/activation_fp32_coder.cc View File

@@ -38,7 +38,7 @@ int ActivationFP32Coder::DoCode(CoderContext *const context) {
} else {
Collect(context, {"nnacl/fp32/activation.h"}, {"activation.c"});
}
nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
switch (activation_parameter->type_) {
case schema::ActivationType_RELU:
code.CodeFunction("Fp32Relu", input_tensor_, count, output_tensor_);


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/addn_fp32_coder.cc View File

@@ -31,7 +31,7 @@ int AddNFP32Coder::DoCode(CoderContext *const context) {
std::string input0_str = allocator_->GetRuntimeAddr(input0);
std::string input1_str = allocator_->GetRuntimeAddr(input1);
Collect(context, {"nnacl/kernel/fp32/add_fp32_slim.h"}, {"add_fp32_slim.c"});
nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
code.CodeFunction("ElementAdd", input0_str, input1_str, output_tensor_, elements_num);
if (input_tensors_.size() > 2) {
for (size_t i = 2; i < input_tensors_.size(); ++i) {


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/addn_fp32_coder.h View File

@@ -20,7 +20,7 @@
#include "micro/coder/opcoders/op_coder.h"

namespace mindspore::lite::micro::nnacl {
class AddNFP32Coder : public OperatorCoder {
class AddNFP32Coder final : public OperatorCoder {
public:
AddNFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 2
- 3
mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_fp32_coder.cc View File

@@ -123,8 +123,7 @@ int ArithmeticFP32Coder::Init(CoderContext *const context) {
}

int ArithmeticFP32Coder::BroadcastRun(const std::string &input0, const std::string &input1, const std::string &output,
int dim, int out_count, int out_thread_stride,
nnacl::NNaclFp32Serializer *const code) {
int dim, int out_count, int out_thread_stride, NNaclFp32Serializer *const code) {
if (dim > break_pos_) {
if (data_type_ == kDataTypeInt) {
*code << "\t\t" << arithmetic_run_int_ << "(((" << input0 << ") + " << out_thread_stride << "), ((" << input1
@@ -270,7 +269,7 @@ int ArithmeticFP32Coder::DoCode(CoderContext *const context) {
int stride = UP_DIV(element_num, thread_num_);
int count = MSMIN(stride, element_num - stride * task_id);
MS_CHECK_TRUE(!arithmetic_run_.empty(), "arithmetic_run function is nullptr!");
nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
/**
* for nnacl's operator combine all arithmetic to nnalc/arithmetic.c
* this solution is not suitable for micro, for the size of package.


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_fp32_coder.h View File

@@ -81,7 +81,7 @@ class ArithmeticFP32Coder final : public OperatorCoder {
int Init(CoderContext *const context);

int BroadcastRun(const std::string &input0, const std::string &input1, const std::string &output, int dim,
int out_count, int out_thread_stride, nnacl::NNaclFp32Serializer *const code);
int out_count, int out_thread_stride, NNaclFp32Serializer *const code);

int break_pos_{0};



+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_self_fp32_coder.cc View File

@@ -67,7 +67,7 @@ int ArithmeticSelfFP32Coder::DoCode(CoderContext *const context) {
MS_CHECK_TRUE(!arithmetic_self_run_.empty(), "arithmetic_run function is nullptr!");

Collect(context, {"nnacl/arithmetic_common.h", "nnacl/fp32/arithmetic_self.h"}, {"nnacl/fp32/arithmetic_self.c"});
nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
code.CodeFunction(arithmetic_self_run_, input_tensor_, output_tensor_, size);

MS_LOG(DEBUG) << "ArithmeticSelfFP32Coder has been called";


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/assign_add_fp32_coder.cc View File

@@ -34,7 +34,7 @@ int AssignAddFP32Coder::DoCode(CoderContext *const context) {
return RET_ERROR;
}

nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
// Get Tensor Pointer
std::string input0_str = allocator_->GetRuntimeAddr(input0);
std::string input1_str = allocator_->GetRuntimeAddr(input1);


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/assign_add_fp32_coder.h View File

@@ -22,7 +22,7 @@
#include "nnacl/base/tile_base.h"

namespace mindspore::lite::micro::nnacl {
class AssignAddFP32Coder : public OperatorCoder {
class AssignAddFP32Coder final : public OperatorCoder {
public:
AssignAddFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/batchnorm_fp32_coder.cc View File

@@ -57,7 +57,7 @@ int BatchnormFP32Coder::DoCode(CoderContext *const context) {
Tensor *mean_tensor = input_tensors_.at(1);
Tensor *var_tensor = input_tensors_.at(2);
Collect(context, {"nnacl/fp32/batchnorm.h"}, {"nnacl/fp32/batchnorm.c"});
nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
code.CodeStruct("bn_parameter", *bn_parameter);
code.CodeFunction("BatchNorm", output_tensor_, input_tensor_, mean_tensor, var_tensor, task_id, "&bn_parameter");
MS_LOG(INFO) << "BatchnormFP32Code has been called";


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/concat_fp32_coder.cc View File

@@ -39,7 +39,7 @@ int ConcatFP32Coder::DoCode(CoderContext *const context) {

size_t input_num = input_tensors_.size();

nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
code << "\t\tvoid *inputs_addr[] = {";
for (size_t i = 0; i < input_num; ++i) {
code << allocator_->GetRuntimeAddr(input_tensors_.at(i)) << ", ";


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/concat_fp32_coder.h View File

@@ -22,7 +22,7 @@
#include "nnacl/concat_parameter.h"

namespace mindspore::lite::micro::nnacl {
class ConcatFP32Coder : public OperatorCoder {
class ConcatFP32Coder final : public OperatorCoder {
public:
ConcatFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/expand_dims_fp32_coder.cc View File

@@ -35,7 +35,7 @@ int ExpandDimsFP32Coder::ReSize() {
int ExpandDimsFP32Coder::DoCode(CoderContext *const context) {
// generate code .h .c
Collect(context, {"nnacl/fp32/expandDims.h"}, {"nnacl/fp32/expandDims.c"});
nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
int task_id = 0;
size_t size = MSMIN(thread_sz_stride_, static_cast<int>(data_size_ - task_id * thread_sz_stride_));
if (!size) {


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/expand_dims_fp32_coder.h View File

@@ -21,7 +21,7 @@
#include "micro/coder/opcoders/op_coder.h"

namespace mindspore::lite::micro::nnacl {
class ExpandDimsFP32Coder : public OperatorCoder {
class ExpandDimsFP32Coder final : public OperatorCoder {
public:
ExpandDimsFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/gather_fp32_coder.cc View File

@@ -34,7 +34,7 @@ int GatherFP32Coder::DoCode(CoderContext *context) {
// generate code .h .c
Collect(context, {"nnacl/fp32/gather.h"}, {"nnacl/fp32/gather.c"});

nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
std::vector<int> in_shape = input0->shape();
int in_rank = in_shape.size();
int indices_element_size = input1->ElementsNum();


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/gather_fp32_coder.h View File

@@ -22,7 +22,7 @@
#include "nnacl/base/tile_base.h"

namespace mindspore::lite::micro::nnacl {
class GatherFP32Coder : public OperatorCoder {
class GatherFP32Coder final : public OperatorCoder {
public:
GatherFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/nchw2nhwc_fp32_coder.cc View File

@@ -28,7 +28,7 @@ int Nchw2NhwcFP32Coder::Prepare(CoderContext *const context) { return RET_OK; }
int Nchw2NhwcFP32Coder::DoCode(CoderContext *context) {
// generate code .h .c
Collect(context, {"nnacl/pack.h"}, {"nnacl/pack.c"});
nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
if (input_tensor_->shape().size() == 4) {
if (input_tensor_->data_type() == kNumberTypeFloat32) {
code.CodeFunction("PackNCHWToNHWCFp32", input_tensor_, output_tensor_, output_tensor_->Batch(),


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/nchw2nhwc_fp32_coder.h View File

@@ -23,7 +23,7 @@
#include "nnacl/base/tile_base.h"

namespace mindspore::lite::micro::nnacl {
class Nchw2NhwcFP32Coder : public OperatorCoder {
class Nchw2NhwcFP32Coder final : public OperatorCoder {
public:
Nchw2NhwcFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/nhwc2nchw_fp32_coder.cc View File

@@ -27,7 +27,7 @@ int Nhwc2NchwFP32Coder::DoCode(CoderContext *const context) {
// generate code .h .c
Collect(context, {"nnacl/pack.h"}, {"pack.c"});

nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
if (input_tensor_->shape().size() == 4) {
if (input_tensor_->data_type() == kNumberTypeFloat32) {
code.CodeFunction("PackNHWCToNCHWFp32", input_tensor_, output_tensor_, output_tensor_->Batch(),


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/nhwc2nchw_fp32_coder.h View File

@@ -22,7 +22,7 @@
#include "nnacl/base/tile_base.h"

namespace mindspore::lite::micro::nnacl {
class Nhwc2NchwFP32Coder : public OperatorCoder {
class Nhwc2NchwFP32Coder final : public OperatorCoder {
public:
Nhwc2NchwFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/pad_fp32_coder.cc View File

@@ -82,7 +82,7 @@ int PadFP32Coder::DoCode(CoderContext *const context) {
int task_id = thread_num_ - 1;
Collect(context, {"nnacl/fp32/pad.h", "nnacl/pad_parameter.h"}, {"nnacl/fp32/pad.c"});

nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
code.CodeArray("in_", in_, DEFAULT_PAD_NDIMS);
code.CodeArray("out_", out_, DEFAULT_PAD_NDIMS);
code.CodeArray("padding_", pad_param_->paddings_, MAX_PAD_SIZE);


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/pad_fp32_coder.h View File

@@ -22,7 +22,7 @@
#include "nnacl/fp32/pad_fp32.h"

namespace mindspore::lite::micro::nnacl {
class PadFP32Coder : public OperatorCoder {
class PadFP32Coder final : public OperatorCoder {
public:
PadFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/pooling_fp32_coder.cc View File

@@ -41,7 +41,7 @@ int PoolingFP32Coder::DoCode(CoderContext *const context) {

pooling_parameter->thread_num_ = pooling_parameter->op_parameter_.thread_num_;

nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
code.CodeStruct("pooling_parameter", *pooling_parameter);
float minf = -FLT_MAX;
float maxf = FLT_MAX;


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/power_fp32_coder.cc View File

@@ -49,7 +49,7 @@ int PowerFP32Coder::DoCode(CoderContext *const context) {
}
// generate code .h .c
Collect(context, {"nnacl/power.h"}, {"power.c"});
nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
code.CodeFunction("Power", input_tensor_, cur_exp_str, output_tensor_, len, scale_, shift_, broadcast);
context->AppendCode(code.str());
return RET_OK;


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/power_fp32_coder.h View File

@@ -22,7 +22,7 @@
#include "nnacl/power_parameter.h"

namespace mindspore::lite::micro::nnacl {
class PowerFP32Coder : public OperatorCoder {
class PowerFP32Coder final : public OperatorCoder {
public:
PowerFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/reduce_fp32_coder.h View File

@@ -23,7 +23,7 @@
#include "micro/coder/opcoders/op_coder.h"

namespace mindspore::lite::micro::nnacl {
class ReduceFP32Coder : public ReduceBaseCoder {
class ReduceFP32Coder final : public ReduceBaseCoder {
public:
ReduceFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/reshape_fp32_coder.cc View File

@@ -27,7 +27,7 @@ int ReshapeFP32Coder::DoCode(CoderContext *const context) {

Collect(context, {"nnacl/reshape.h"}, {"reshape.c"});

nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
code.CodeFunction("Reshape", input_tensor_, output_tensor_, data_size);
context->AppendCode(code.str());
return RET_OK;


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/reshape_fp32_coder.h View File

@@ -21,7 +21,7 @@
#include "micro/coder/opcoders/op_coder.h"

namespace mindspore::lite::micro::nnacl {
class ReshapeFP32Coder : public OperatorCoder {
class ReshapeFP32Coder final : public OperatorCoder {
public:
ReshapeFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/scale_fp32_coder.cc View File

@@ -128,7 +128,7 @@ int ScaleFP32Coder::DoCode(CoderContext *const context) {
MS_CHECK_PTR(offset_tensor);
Collect(context, {"nnacl/scale.h", "nnacl/fp32/scale.h", "nnacl/quantization/quantize.h"}, {"scale.c"});

nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
code.CodeStruct("scale_parameter", *scale_param_);

if (thread_num_ > 1) {


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/slice_fp32_coder.cc View File

@@ -34,7 +34,7 @@ int SliceFP32Coder::DoCode(CoderContext *const context) {
std::vector<int> begin = primitive_slice->GetPostProcessBegin();
std::vector<int> size = primitive_slice->GetPostProcessSize();
std::vector<int> input_shape = input_tensor_->shape();
nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
for (int i = 0; i < param->param_length_; i++) {
param->shape_[i] = input_shape.at(i);
}


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/slice_fp32_coder.h View File

@@ -21,7 +21,7 @@
#include "micro/coder/opcoders/op_coder.h"

namespace mindspore::lite::micro::nnacl {
class SliceFP32Coder : public OperatorCoder {
class SliceFP32Coder final : public OperatorCoder {
public:
SliceFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/squeeze_dims_fp32_coder.cc View File

@@ -28,7 +28,7 @@ int SqueezeFP32Coder::DoCode(CoderContext *const context) {
// generate code .h .c
Collect(context, {"nnacl/squeeze.h"}, {"nnacl/squeeze.c"});

nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;

// call the op function
if (input_tensor_->data_type() == kNumberTypeInt32) {


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/squeeze_dims_fp32_coder.h View File

@@ -21,7 +21,7 @@
#include "micro/coder/opcoders/op_coder.h"

namespace mindspore::lite::micro::nnacl {
class SqueezeFP32Coder : public OperatorCoder {
class SqueezeFP32Coder final : public OperatorCoder {
public:
SqueezeFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/tile_fp32_coder.cc View File

@@ -53,7 +53,7 @@ int TileFP32Coder::DoCode(CoderContext *const context) {
// generate code .h .c
Collect(context, {"nnacl/fp32/tile.h"}, {"nnacl/fp32/tile.c"});

nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;

code.CodeStruct("tile_parameter", *tile_param_);
// call the op function


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/tile_fp32_coder.h View File

@@ -22,7 +22,7 @@
#include "nnacl/base/tile_base.h"

namespace mindspore::lite::micro::nnacl {
class TileFP32Coder : public OperatorCoder {
class TileFP32Coder final : public OperatorCoder {
public:
TileFP32Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/fp32/transpose_fp32_coder.cc View File

@@ -80,7 +80,7 @@ int TransposeFp32Coder::DoCode(CoderContext *const context) {

Collect(context, {"nnacl/transpose.h", "nnacl/fp32/transpose.h", "nnacl/errorcode.h"}, {"transpose.c"});

nnacl::NNaclFp32Serializer code;
NNaclFp32Serializer code;
code.CodeStruct("transpose_parameter", *transpose_parameter_);

code.CodeFunction("DoTransposeFp32", input_tensor_, output_tensor_, in_shape_, out_shape_, "&transpose_parameter",


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/int8/concat_int8_coder.cc View File

@@ -85,7 +85,7 @@ int ConcatInt8Coder::DoCode(CoderContext *const context) {
concat_param_->count_unit_ = count_unit_;

Collect(context, {"nnacl/int8/concat_int8.h"}, {"concat_int8.c"});
nnacl::NNaclInt8Serializer code;
NNaclInt8Serializer code;

int in_tensor_count = input_tensors().size();
code << "int8_t *input_data[" << in_tensor_count << "];\n";


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/int8/concat_int8_coder.h View File

@@ -23,7 +23,7 @@
#include "nnacl/int8/concat_int8.h"

namespace mindspore::lite::micro::nnacl {
class ConcatInt8Coder : public OperatorCoder {
class ConcatInt8Coder final : public OperatorCoder {
public:
ConcatInt8Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/int8/pooling_int8_coder.cc View File

@@ -49,7 +49,7 @@ int PoolingInt8Coder::DoCode(CoderContext *const context) {
std::vector<QuantArg> in_quant_args = in_tensor->quant_params();
std::vector<QuantArg> out_quant_args = out_tensor->quant_params();
Collect(context, {"nnacl/int8/pooling_int8.h", "nnacl/errorcode.h"}, {"pooling_int8.c"});
nnacl::NNaclInt8Serializer code;
NNaclInt8Serializer code;
code.precision(kPrecision);
// code op parameter
::QuantArg quant_arg_in = {static_cast<float>(in_quant_args.at(0).scale), in_quant_args.at(0).zeroPoint};


+ 2
- 2
mindspore/lite/micro/coder/opcoders/nnacl/int8/reduce_int8_coder.cc View File

@@ -221,8 +221,8 @@ int ReduceInt8Coder::DoCode(CoderContext *const context) {
code.CodeFunction(reducer_, outer_size_, inner_size_, axis_size_, begin_src_data_src, dst_addr, ptr_quan_arg_i,
task_id, thread_num_);
} else {
code.CodeFunction(last_reducer_, outer_size_, inner_size_, axis_size_, src_addr, dst_addr, ptr_quan_arg_i,
task_id, thread_num_);
code.CodeFunction(last_reducer_, outer_size_, inner_size_, axis_size_, begin_src_data_src, dst_addr,
ptr_quan_arg_i, task_id, thread_num_);
}
begin_src_data_src = dst_addr;
}


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/int8/reduce_int8_coder.h View File

@@ -24,7 +24,7 @@
#include "nnacl/int8/reduce_int8.h"
#include "micro/coder/opcoders/base/reduce_base_coder.h"
namespace mindspore::lite::micro::nnacl {
class ReduceInt8Coder : public ReduceBaseCoder {
class ReduceInt8Coder final : public ReduceBaseCoder {
public:
ReduceInt8Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/int8/reshape_int8_coder.cc View File

@@ -35,7 +35,7 @@ int ReshapeInt8Coder::DoCode(CoderContext *const context) {
std::vector<QuantArg> output_quant_args = output->quant_params();

Collect(context, {"nnacl/int8/reshape_int8.h"}, {"reshape_int8.c"});
nnacl::NNaclInt8Serializer code;
NNaclInt8Serializer code;
code.precision(kPrecision);
ReshapeQuantArg reshape_quant_arg = {
{static_cast<float>(input_quant_args.at(0).scale), input_quant_args.at(0).zeroPoint},


+ 1
- 1
mindspore/lite/micro/coder/opcoders/nnacl/int8/reshape_int8_coder.h View File

@@ -21,7 +21,7 @@
#include "micro/coder/opcoders/op_coder.h"

namespace mindspore::lite::micro::nnacl {
class ReshapeInt8Coder : public OperatorCoder {
class ReshapeInt8Coder final : public OperatorCoder {
public:
ReshapeInt8Coder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
const Model::Node *node, size_t node_index, Target target)


Loading…
Cancel
Save