Browse Source

!5715 fix reverse_sequence parser tflite and remove redundant code

Merge pull request !5715 from 徐安越/master
tags/v1.0.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
9ffc2e30b5
31 changed files with 8 additions and 505 deletions
  1. +0
    -1
      mindspore/lite/java/java/app/src/main/native/runtime/model.cpp
  2. +0
    -1
      mindspore/lite/nnacl/fp32_grad/gemm.c
  3. +0
    -12
      mindspore/lite/nnacl/fp32_grad/pooling_grad.c
  4. +0
    -16
      mindspore/lite/nnacl/quantization/quantize.h
  5. +1
    -0
      mindspore/lite/nnacl/sparse_to_dense.h
  6. +1
    -5
      mindspore/lite/schema/ops.fbs
  7. +0
    -40
      mindspore/lite/src/common/file_utils.cc
  8. +1
    -17
      mindspore/lite/src/ops/reverse_sequence.cc
  9. +0
    -2
      mindspore/lite/src/ops/reverse_sequence.h
  10. +1
    -49
      mindspore/lite/src/ops/sparse_to_dense.cc
  11. +2
    -0
      mindspore/lite/src/populate_parameter.cc
  12. +0
    -5
      mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc
  13. +0
    -4
      mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc
  14. +0
    -5
      mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc
  15. +0
    -5
      mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.cc
  16. +0
    -5
      mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc
  17. +0
    -2
      mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc
  18. +0
    -1
      mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc
  19. +0
    -5
      mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc
  20. +0
    -107
      mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc
  21. +0
    -3
      mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h
  22. +0
    -6
      mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.cc
  23. +0
    -12
      mindspore/lite/src/runtime/opencl/opencl_wrapper.cc
  24. +0
    -3
      mindspore/lite/src/runtime/thread_pool.c
  25. +0
    -2
      mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reverse_sequence_parser_test.cc
  26. +0
    -6
      mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_sparse_to_dense_parser_test.cc
  27. +0
    -128
      mindspore/lite/tools/anf_importer/anf_importer.cc
  28. +0
    -11
      mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc
  29. +0
    -33
      mindspore/lite/tools/anf_importer/import_from_protobuf.h
  30. +2
    -5
      mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.cc
  31. +0
    -14
      mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.cc

+ 0
- 1
mindspore/lite/java/java/app/src/main/native/runtime/model.cpp View File

@@ -31,7 +31,6 @@ extern "C" JNIEXPORT jlong JNICALL Java_com_mindspore_lite_Model_loadModel(JNIEn


MS_LOGD("Start Loading model"); MS_LOGD("Start Loading model");
auto model = mindspore::lite::Model::Import(model_buffer, buffer_len); auto model = mindspore::lite::Model::Import(model_buffer, buffer_len);
// env->DeleteLocalRef(*(jobject *)model_buffer);
if (model == nullptr) { if (model == nullptr) {
MS_LOGE("Import model failed"); MS_LOGE("Import model failed");
return reinterpret_cast<jlong>(nullptr); return reinterpret_cast<jlong>(nullptr);


+ 0
- 1
mindspore/lite/nnacl/fp32_grad/gemm.c View File

@@ -77,7 +77,6 @@ static void gemm_tt(int M, int N, int K, float alpha, float *mat_a, int lda, flo


void gemm(int transpose_a, int transpose_b, int M, int N, int K, float alpha, float *mat_a, int lda, float *mat_b, void gemm(int transpose_a, int transpose_b, int M, int N, int K, float alpha, float *mat_a, int lda, float *mat_b,
int ldb, float beta, float *mat_c, int ldc) { int ldb, float beta, float *mat_c, int ldc) {
// printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
if (beta >= 0.f && beta <= 0.f) { if (beta >= 0.f && beta <= 0.f) {
for (int i = 0; i < M; ++i) { for (int i = 0; i < M; ++i) {
for (int j = 0; j < N; ++j) { for (int j = 0; j < N; ++j) {


+ 0
- 12
mindspore/lite/nnacl/fp32_grad/pooling_grad.c View File

@@ -33,15 +33,9 @@ void AvgPoolingGrad(const float *input_ptr, float *output_ptr, PoolingParameter
const float *inPtr = NULL; const float *inPtr = NULL;
for (int i = 0; i < output_h * output_w * channel * output_batch; i++) output_ptr[i] = 0.0; for (int i = 0; i < output_h * output_w * channel * output_batch; i++) output_ptr[i] = 0.0;


// int pad_top = padding[2];

float kk = (float)(win_h * win_w); float kk = (float)(win_h * win_w);


for (uint16_t ib = 0; ib < output_batch; ib++) { for (uint16_t ib = 0; ib < output_batch; ib++) {
// int in_batch_offset = batch * in_h * in_w * channel;
// int out_batch_offset = batch * output_h * output_w * channel;
// out = grads->getData(ib*grads->imgSize());
// inPtr = in->getData(ib*in->imgSize());
float *out; float *out;
out = &output_ptr[(ib * output_h * output_w)]; out = &output_ptr[(ib * output_h * output_w)];
inPtr = (float *)(&input_ptr[(ib * in_h * in_w)]); inPtr = (float *)(&input_ptr[(ib * in_h * in_w)]);
@@ -97,12 +91,6 @@ void AvgPoolingGrad(const float *input_ptr, float *output_ptr, PoolingParameter
} }


void MaxPoolingGrad(const float *dy, const int *indices, float *output_ptr, PoolingParameter *pooling_param) { void MaxPoolingGrad(const float *dy, const int *indices, float *output_ptr, PoolingParameter *pooling_param) {
// int stride_w = pooling_param->stride_w_;
// int stride_h = pooling_param->stride_h_;
// int pad_w = pooling_param->pad_l_;
// int pad_h = pooling_param->pad_u_;
// int win_w = pooling_param->window_w_;
// int win_h = pooling_param->window_h_;
int channel = pooling_param->input_channel_; int channel = pooling_param->input_channel_;
int in_w = pooling_param->input_w_; int in_w = pooling_param->input_w_;
int in_h = pooling_param->input_h_; int in_h = pooling_param->input_h_;


+ 0
- 16
mindspore/lite/nnacl/quantization/quantize.h View File

@@ -99,22 +99,6 @@ typedef struct PreluQuantArg {
QuantArg out_quant_args_; QuantArg out_quant_args_;
} PreluQuantArg; } PreluQuantArg;


/*typedef struct SigmoidQuantArg {
int *input_sizes_;
int output_size_;
int **input_shapes_;
int *output_shape_;
size_t input_num_;
size_t output_dim_;
float alpha_;
QuantArg in_args_;
QuantArg out_args_;
int output_activation_min_;
int output_activation_max_;
QuantArg *in_quant_args_;
QuantArg out_quant_args_;
} SigmoidQuantArg;*/

typedef struct MatmulQuantArg { typedef struct MatmulQuantArg {
QuantArg input; QuantArg input;
QuantArg weight; QuantArg weight;


+ 1
- 0
mindspore/lite/nnacl/sparse_to_dense.h View File

@@ -20,6 +20,7 @@


typedef struct SparseToDenseParameter { typedef struct SparseToDenseParameter {
OpParameter op_parameter_; OpParameter op_parameter_;
bool validate_indices_;
int thread_num_; int thread_num_;
int count_; int count_;
} SparseToDenseParameter; } SparseToDenseParameter;


+ 1
- 5
mindspore/lite/schema/ops.fbs View File

@@ -703,16 +703,12 @@ table SpaceToBatch {
} }


table SparseToDense { table SparseToDense {
outputShape: [int];
sparseValue: [int];
defaultValue: [int];
validateIndices: bool; validateIndices: bool;
} }


table ReverseSequence { table ReverseSequence {
seqAxis: int; seqAxis: int;
batchAxis: int; batchAxis: int;
seqLengths: [int];
} }


table Rank { table Rank {
@@ -904,4 +900,4 @@ table Proposal {


table Custom { table Custom {
custom : [ubyte]; custom : [ubyte];
}
}

+ 0
- 40
mindspore/lite/src/common/file_utils.cc View File

@@ -125,45 +125,5 @@ void CompareOutput(float *output_data, std::string file_path) {
printf("output num : %zu\n", output_num); printf("output num : %zu\n", output_num);
CompareOutputData(output_data, ground_truth, output_num); CompareOutputData(output_data, ground_truth, output_num);
} }

// std::string GetAndroidPackageName() {
// static std::string packageName;
//
// if (!packageName.empty()) {
// return packageName;
// }
//
// char cmdline[MAX_FILENAME_LEN] = {0};
// int fd = open("/proc/self/cmdline", O_RDONLY);
//
// if (fd >= 0) {
// char ch;
// int i = 0;
// while (read(fd, &ch, sizeof(ch)) > 0 && !isspace(ch)) {
// if (':' == ch) {
// break;
// }
//
// if (('/' == ch) || ('\\' == ch)) {
// (void)memset(cmdline, 0, sizeof(cmdline));
// i = 0;
// } else {
// cmdline[i] = ch;
// i++;
// }
// }
// close(fd);
// }
// packageName = std::string(cmdline);
// return packageName;
//}

// std::string GetAndroidPackagePath() {
// std::string packageName = GetAndroidPackageName();
// if (packageName.empty()) {
// return "./";
// }
// return "/data/data/" + packageName + '/';
//}
} // namespace lite } // namespace lite
} // namespace mindspore } // namespace mindspore

+ 1
- 17
mindspore/lite/src/ops/reverse_sequence.cc View File

@@ -21,26 +21,16 @@ namespace lite {
#ifdef PRIMITIVE_WRITEABLE #ifdef PRIMITIVE_WRITEABLE
int ReverseSequence::GetSeqAxis() const { return this->primitive_->value.AsReverseSequence()->seqAxis; } int ReverseSequence::GetSeqAxis() const { return this->primitive_->value.AsReverseSequence()->seqAxis; }
int ReverseSequence::GetBatchAxis() const { return this->primitive_->value.AsReverseSequence()->batchAxis; } int ReverseSequence::GetBatchAxis() const { return this->primitive_->value.AsReverseSequence()->batchAxis; }
std::vector<int> ReverseSequence::GetSeqLengths() const {
return this->primitive_->value.AsReverseSequence()->seqLengths;
}


void ReverseSequence::SetSeqAxis(int seq_axis) { this->primitive_->value.AsReverseSequence()->seqAxis = seq_axis; } void ReverseSequence::SetSeqAxis(int seq_axis) { this->primitive_->value.AsReverseSequence()->seqAxis = seq_axis; }
void ReverseSequence::SetBatchAxis(int batch_axis) { void ReverseSequence::SetBatchAxis(int batch_axis) {
this->primitive_->value.AsReverseSequence()->batchAxis = batch_axis; this->primitive_->value.AsReverseSequence()->batchAxis = batch_axis;
} }
void ReverseSequence::SetSeqLengths(const std::vector<int> &seq_lengths) {
this->primitive_->value.AsReverseSequence()->seqLengths = seq_lengths;
}


#else #else


int ReverseSequence::GetSeqAxis() const { return this->primitive_->value_as_ReverseSequence()->seqAxis(); } int ReverseSequence::GetSeqAxis() const { return this->primitive_->value_as_ReverseSequence()->seqAxis(); }
int ReverseSequence::GetBatchAxis() const { return this->primitive_->value_as_ReverseSequence()->batchAxis(); } int ReverseSequence::GetBatchAxis() const { return this->primitive_->value_as_ReverseSequence()->batchAxis(); }
std::vector<int> ReverseSequence::GetSeqLengths() const {
auto fb_vector = this->primitive_->value_as_ReverseSequence()->seqLengths();
return std::vector<int>(fb_vector->begin(), fb_vector->end());
}
int ReverseSequence::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { int ReverseSequence::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive); MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb); MS_ASSERT(nullptr != fbb);
@@ -50,13 +40,7 @@ int ReverseSequence::UnPackToFlatBuilder(const schema::Primitive *primitive, fla
MS_LOG(ERROR) << "value_as_ReverseSequence return nullptr"; MS_LOG(ERROR) << "value_as_ReverseSequence return nullptr";
return RET_ERROR; return RET_ERROR;
} }
std::vector<int32_t> seqLengths;
if (attr->seqLengths() != nullptr) {
for (int i = 0; i < static_cast<int>(attr->seqLengths()->size()); i++) {
seqLengths.push_back(attr->seqLengths()->data()[i]);
}
}
auto val_offset = schema::CreateReverseSequenceDirect(*fbb, attr->seqAxis(), attr->batchAxis(), &seqLengths);
auto val_offset = schema::CreateReverseSequence(*fbb, attr->seqAxis(), attr->batchAxis());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ReverseSequence, val_offset.o); auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ReverseSequence, val_offset.o);
fbb->Finish(prim_offset); fbb->Finish(prim_offset);
return RET_OK; return RET_OK;


+ 0
- 2
mindspore/lite/src/ops/reverse_sequence.h View File

@@ -34,7 +34,6 @@ class ReverseSequence : public PrimitiveC {
explicit ReverseSequence(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} explicit ReverseSequence(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
void SetSeqAxis(int seq_axis); void SetSeqAxis(int seq_axis);
void SetBatchAxis(int batch_axis); void SetBatchAxis(int batch_axis);
void SetSeqLengths(const std::vector<int> &seq_lengths);
#else #else
ReverseSequence() = default; ReverseSequence() = default;


@@ -43,7 +42,6 @@ class ReverseSequence : public PrimitiveC {
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetSeqAxis() const; int GetSeqAxis() const;
int GetBatchAxis() const; int GetBatchAxis() const;
std::vector<int> GetSeqLengths() const;
}; };
} // namespace lite } // namespace lite
} // namespace mindspore } // namespace mindspore


+ 1
- 49
mindspore/lite/src/ops/sparse_to_dense.cc View File

@@ -19,44 +19,14 @@
namespace mindspore { namespace mindspore {
namespace lite { namespace lite {
#ifdef PRIMITIVE_WRITEABLE #ifdef PRIMITIVE_WRITEABLE
std::vector<int> SparseToDense::GetOutputShape() const {
return this->primitive_->value.AsSparseToDense()->outputShape;
}
std::vector<int> SparseToDense::GetSparseValue() const {
return this->primitive_->value.AsSparseToDense()->sparseValue;
}
std::vector<int> SparseToDense::GetDefaultValue() const {
return this->primitive_->value.AsSparseToDense()->defaultValue;
}
bool SparseToDense::GetValidateIndices() const { return this->primitive_->value.AsSparseToDense()->validateIndices; } bool SparseToDense::GetValidateIndices() const { return this->primitive_->value.AsSparseToDense()->validateIndices; }


void SparseToDense::SetOutputShape(const std::vector<int> &output_shape) {
this->primitive_->value.AsSparseToDense()->outputShape = output_shape;
}
void SparseToDense::SetSparseValue(const std::vector<int> &sparse_value) {
this->primitive_->value.AsSparseToDense()->sparseValue = sparse_value;
}
void SparseToDense::SetDefaultValue(const std::vector<int> &default_value) {
this->primitive_->value.AsSparseToDense()->defaultValue = default_value;
}
void SparseToDense::SetValidateIndices(bool validate_indices) { void SparseToDense::SetValidateIndices(bool validate_indices) {
this->primitive_->value.AsSparseToDense()->validateIndices = validate_indices; this->primitive_->value.AsSparseToDense()->validateIndices = validate_indices;
} }


#else #else


std::vector<int> SparseToDense::GetOutputShape() const {
auto fb_vector = this->primitive_->value_as_SparseToDense()->outputShape();
return std::vector<int>(fb_vector->begin(), fb_vector->end());
}
std::vector<int> SparseToDense::GetSparseValue() const {
auto fb_vector = this->primitive_->value_as_SparseToDense()->sparseValue();
return std::vector<int>(fb_vector->begin(), fb_vector->end());
}
std::vector<int> SparseToDense::GetDefaultValue() const {
auto fb_vector = this->primitive_->value_as_SparseToDense()->defaultValue();
return std::vector<int>(fb_vector->begin(), fb_vector->end());
}
bool SparseToDense::GetValidateIndices() const { return this->primitive_->value_as_SparseToDense()->validateIndices(); } bool SparseToDense::GetValidateIndices() const { return this->primitive_->value_as_SparseToDense()->validateIndices(); }
int SparseToDense::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { int SparseToDense::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive); MS_ASSERT(nullptr != primitive);
@@ -66,25 +36,7 @@ int SparseToDense::UnPackToFlatBuilder(const schema::Primitive *primitive, flatb
MS_LOG(ERROR) << "value_as_SparseToDense return nullptr"; MS_LOG(ERROR) << "value_as_SparseToDense return nullptr";
return RET_ERROR; return RET_ERROR;
} }
std::vector<int32_t> outputShape;
if (attr->outputShape() != nullptr) {
for (int i = 0; i < static_cast<int>(attr->outputShape()->size()); i++) {
outputShape.push_back(attr->outputShape()->data()[i]);
}
}
std::vector<int32_t> sparseValue;
if (attr->sparseValue() != nullptr) {
for (int i = 0; i < static_cast<int>(attr->sparseValue()->size()); i++) {
sparseValue.push_back(attr->sparseValue()->data()[i]);
}
}
std::vector<int32_t> defaultValue;
if (attr->defaultValue() != nullptr) {
for (int i = 0; i < static_cast<int>(attr->defaultValue()->size()); i++) {
defaultValue.push_back(attr->defaultValue()->data()[i]);
}
}
auto val_offset = schema::CreateSparseToDenseDirect(*fbb, &outputShape, &sparseValue, &defaultValue);
auto val_offset = schema::CreateSparseToDense(*fbb, attr->validateIndices());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_SparseToDense, val_offset.o); auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_SparseToDense, val_offset.o);
fbb->Finish(prim_offset); fbb->Finish(prim_offset);
return RET_OK; return RET_OK;


+ 2
- 0
mindspore/lite/src/populate_parameter.cc View File

@@ -1031,6 +1031,8 @@ OpParameter *PopulateSparseToDenseParameter(const mindspore::lite::PrimitiveC *p
} }
memset(sparse_to_dense_param, 0, sizeof(SparseToDenseParameter)); memset(sparse_to_dense_param, 0, sizeof(SparseToDenseParameter));
sparse_to_dense_param->op_parameter_.type_ = primitive->Type(); sparse_to_dense_param->op_parameter_.type_ = primitive->Type();
auto param = reinterpret_cast<mindspore::lite::SparseToDense *>(const_cast<mindspore::lite::PrimitiveC *>(primitive));
sparse_to_dense_param->validate_indices_ = param->GetValidateIndices();
return reinterpret_cast<OpParameter *>(sparse_to_dense_param); return reinterpret_cast<OpParameter *>(sparse_to_dense_param);
} }




+ 0
- 5
mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc View File

@@ -250,11 +250,6 @@ int ConvolutionBaseCPUKernel::SetOutputTensorQuantParam() {
} else { } else {
MS_LOG(ERROR) << "Not Support Per Channel for input now."; MS_LOG(ERROR) << "Not Support Per Channel for input now.";
return RET_ERROR; return RET_ERROR;
// auto output_quant_arg = output_tensor->GetQuantParams();
// for (int i = 0; i < out_arg_num; ++i) {
// conv_quant_arg_->output_quant_args_[i].zp_ = output_quant_arg[i].zeroPoint;
// conv_quant_arg_->output_quant_args_[i].scale_ = output_quant_arg[i].scale;
// }
} }
return RET_OK; return RET_OK;
} }


+ 0
- 4
mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc View File

@@ -230,10 +230,6 @@ kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector<lite::tensor::Ten
conv_param->input_w_ = inputs.front()->Width(); conv_param->input_w_ = inputs.front()->Width();
conv_param->output_h_ = outputs.front()->Height(); conv_param->output_h_ = outputs.front()->Height();
conv_param->output_w_ = outputs.front()->Width(); conv_param->output_w_ = outputs.front()->Width();
// bool prefer_flag = false;
// if (conv_param->output_h_ * conv_param->output_w_ > 64) {
// prefer_flag = true;
// }


kernel::LiteKernel *kernel = nullptr; kernel::LiteKernel *kernel = nullptr;
if (kernel_h == 3 && kernel_w == 3 && stride_h == 1 && stride_w == 1 && dilation_h == 1 && dilation_w == 1) { if (kernel_h == 3 && kernel_w == 3 && stride_h == 1 && stride_w == 1 && dilation_h == 1 && dilation_w == 1) {


+ 0
- 5
mindspore/lite/src/runtime/kernel/arm/fp32/convolution.cc View File

@@ -45,13 +45,8 @@ int ConvolutionCPUKernel::InitWeightBias() {
int ic4 = UP_DIV(in_channel, C4NUM); int ic4 = UP_DIV(in_channel, C4NUM);
int kernel_plane = kernel_h * kernel_w; int kernel_plane = kernel_h * kernel_w;
int oc_block, oc_block_num; int oc_block, oc_block_num;
// #ifdef ENABLE_ARM32
// oc_block = C4NUM;
// oc_block_num = UP_DIV(out_channel, C4NUM);
// #else
oc_block = C8NUM; oc_block = C8NUM;
oc_block_num = UP_DIV(out_channel, C8NUM); oc_block_num = UP_DIV(out_channel, C8NUM);
// #endif
int pack_weight_size = oc_block_num * oc_block * ic4 * C4NUM * kernel_plane; int pack_weight_size = oc_block_num * oc_block * ic4 * C4NUM * kernel_plane;


auto origin_weight = reinterpret_cast<float *>(filter_tensor->Data()); auto origin_weight = reinterpret_cast<float *>(filter_tensor->Data());


+ 0
- 5
mindspore/lite/src/runtime/kernel/arm/fp32/convolution_3x3.cc View File

@@ -57,13 +57,8 @@ int Convolution3x3CPUKernel::InitWeightBias() {
int iC4 = UP_DIV(input_channel, C4NUM); int iC4 = UP_DIV(input_channel, C4NUM);
int oC4 = UP_DIV(output_channel, C4NUM); int oC4 = UP_DIV(output_channel, C4NUM);
int oc_block, oc_block_num; int oc_block, oc_block_num;
// #ifdef ENABLE_ARM32
// oc_block = C4NUM;
// oc_block_num = UP_DIV(output_channel, C4NUM);
// #else
oc_block = C8NUM; oc_block = C8NUM;
oc_block_num = UP_DIV(output_channel, C8NUM); oc_block_num = UP_DIV(output_channel, C8NUM);
// #endif
const int k_plane = 16; const int k_plane = 16;
// init weight // init weight
size_t transformed_size = iC4 * C4NUM * oc_block_num * oc_block * k_plane * sizeof(float); size_t transformed_size = iC4 * C4NUM * oc_block_num * oc_block * k_plane * sizeof(float);


+ 0
- 5
mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd.cc View File

@@ -115,13 +115,8 @@ int ConvolutionWinogradCPUKernel::InitWeightBias() {


int oc4 = UP_DIV(out_channel, C4NUM); int oc4 = UP_DIV(out_channel, C4NUM);
int oc_block, oc_block_num; int oc_block, oc_block_num;
// #ifdef ENABLE_ARM32
// oc_block = C4NUM;
// oc_block_num = UP_DIV(output_channel, C4NUM);
// #else
oc_block = C8NUM; oc_block = C8NUM;
oc_block_num = UP_DIV(out_channel, C8NUM); oc_block_num = UP_DIV(out_channel, C8NUM);
// #endif


// init weight // init weight
auto ret = MallocFilterMatrix(oc_block, oc_block_num); auto ret = MallocFilterMatrix(oc_block, oc_block_num);


+ 0
- 2
mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc View File

@@ -146,7 +146,6 @@ int ArithmeticGradCPUKernel::InferShape() {


dx1->set_shape(x1->shape()); dx1->set_shape(x1->shape());
dx2->set_shape(x2->shape()); dx2->set_shape(x2->shape());
// outTensor->set_shape(out_shape);
dx1->set_data_type(dy->data_type()); dx1->set_data_type(dy->data_type());
dx2->set_data_type(dy->data_type()); dx2->set_data_type(dy->data_type());
return RET_OK; return RET_OK;
@@ -261,7 +260,6 @@ int ArithmeticGradCPUKernel::ReSize() { return RET_OK; }


int ArithmeticGradCPUKernel::Run() { int ArithmeticGradCPUKernel::Run() {
auto dy = reinterpret_cast<float *>(inputs_[0]->Data()); auto dy = reinterpret_cast<float *>(inputs_[0]->Data());
// auto input1_data1 = reinterpret_cast<float *>(inputs_[1]->Data());
auto dx1 = reinterpret_cast<float *>(outputs_[0]->Data()); auto dx1 = reinterpret_cast<float *>(outputs_[0]->Data());
auto dx2 = reinterpret_cast<float *>(outputs_[1]->Data()); auto dx2 = reinterpret_cast<float *>(outputs_[1]->Data());




+ 0
- 1
mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc View File

@@ -77,7 +77,6 @@ int BiasGradCPUKernel::Run() {
} }
auto in = reinterpret_cast<float *>(inputs_.at(0)->Data()); auto in = reinterpret_cast<float *>(inputs_.at(0)->Data());
auto out = reinterpret_cast<float *>(outputs_.at(0)->Data()); auto out = reinterpret_cast<float *>(outputs_.at(0)->Data());
// size_t data_size = inputs_.at(0)->ElementsNum();


size_t nhw_size = 1; size_t nhw_size = 1;
size_t channels = bias_param->in_shape0_[bias_param->ndim_ - 1]; // C in NHWC size_t channels = bias_param->in_shape0_[bias_param->ndim_ - 1]; // C in NHWC


+ 0
- 5
mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc View File

@@ -48,7 +48,6 @@ int BNGradInputCPUKernel::Init() {
return RET_ERROR; return RET_ERROR;
} }
auto *input_tensor = inputs_.at(0); auto *input_tensor = inputs_.at(0);
// auto *weight_tensor = inputs_.at(1);
auto *out_tensor = outputs_.at(0); auto *out_tensor = outputs_.at(0);
auto in_shape = input_tensor->shape(); auto in_shape = input_tensor->shape();
out_tensor->set_shape(in_shape); out_tensor->set_shape(in_shape);
@@ -59,12 +58,10 @@ int BNGradInputCPUKernel::Init() {
int BNGradInputCPUKernel::ReSize() { return RET_OK; } int BNGradInputCPUKernel::ReSize() { return RET_OK; }


int BNGradInputCPUKernel::Run() { int BNGradInputCPUKernel::Run() {
// std::cout << "run succ" << std::endl;
auto *input_x = inputs_.at(0); auto *input_x = inputs_.at(0);
auto *input_yt = inputs_.at(1); auto *input_yt = inputs_.at(1);
auto *input_scale = inputs_.at(2); auto *input_scale = inputs_.at(2);
auto *output_grad = outputs_.at(0); auto *output_grad = outputs_.at(0);
// Tensor *bias = input[5];
auto bn_param = reinterpret_cast<bnParameter *>(opParameter); auto bn_param = reinterpret_cast<bnParameter *>(opParameter);
int batch = bn_param->batch; int batch = bn_param->batch;
int channels = bn_param->channels; int channels = bn_param->channels;
@@ -100,8 +97,6 @@ kernel::LiteKernel *CpuBNGradInputFp32KernelCreator(const std::vector<lite::tens
const mindspore::lite::PrimitiveC *primitive) { const mindspore::lite::PrimitiveC *primitive) {
MS_ASSERT(opParameter != nullptr); MS_ASSERT(opParameter != nullptr);
MS_ASSERT(desc.type == schema::PrimitiveType_BNGradInput); MS_ASSERT(desc.type == schema::PrimitiveType_BNGradInput);
// parameter->name = opDef.name()->str().data();
// parameter->type = opDef.attr_type();
auto *kernel = new (std::nothrow) BNGradInputCPUKernel(opParameter, inputs, outputs, ctx, primitive); auto *kernel = new (std::nothrow) BNGradInputCPUKernel(opParameter, inputs, outputs, ctx, primitive);
if (kernel == nullptr) { if (kernel == nullptr) {
MS_LOG(ERROR) << "new BNGradInputCPUKernel fail!"; MS_LOG(ERROR) << "new BNGradInputCPUKernel fail!";


+ 0
- 107
mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc View File

@@ -28,90 +28,7 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_PoolingGrad; using mindspore::schema::PrimitiveType_PoolingGrad;


namespace mindspore::kernel { namespace mindspore::kernel {
#if 0
int PoolingGradCPUKernel::TfPadding(int input_w, int input_h, int &output_w, int &output_h) {
PoolingParameter *pool_param = reinterpret_cast<PoolingParameter *> (opParameter);

auto stride_w = pool_param->stride_w_;
auto stride_h = pool_param->stride_h_;
auto window_w = pool_param->window_w_;
auto window_h = pool_param->window_h_;
auto pad_up = pool_param->pad_u_;
auto pad_down = pool_param->pad_d_;
auto pad_left = pool_param->pad_l_;
auto pad_right = pool_param->pad_r_;
if (pool_param->pad_mode_ == PADMODE_SAME) {
output_w = ceil(input_w / stride_w);
output_h = ceil(input_h / stride_h);
} else {
output_w = ceil((input_w + pad_left + pad_right - window_w + 1) / stride_w);
output_h = ceil((input_h + pad_up + pad_down - window_h + 1) / stride_h);
}
return RET_OK;
}

int PoolingGradCPUKernel::CaffePadding(int input_w, int input_h, int &output_w, int &output_h) {
PoolingParameter *pool_param = reinterpret_cast<PoolingParameter *> (opParameter);

auto round_mode = pool_param->round_mode_;
auto stride_w = pool_param->stride_w_;
auto stride_h = pool_param->stride_h_;
auto window_w = pool_param->window_w_;
auto window_h = pool_param->window_h_;
auto pad_up = pool_param->pad_u_;
auto pad_down = pool_param->pad_d_;
auto pad_left = pool_param->pad_l_;
auto pad_right = pool_param->pad_r_;
if (round_mode == ROUNDMODE_FLOOR && false) {
output_w = floor((input_w + pad_left + pad_right - window_w) / stride_w + 1);
output_h = floor((input_h + pad_up + pad_down - window_h) / stride_h + 1);
} else if (round_mode == ROUNDMODE_CEIL || true) {
output_w = ceil((input_w + pad_left + pad_right - window_w) / stride_w + 1);
output_h = ceil((input_h + pad_up + pad_down - window_h) / stride_h + 1);
} else {
MS_LOG(ERROR) << "round mode not support.";
}

if (pad_left > 0 || pad_up > 0) {
if ((output_w - 1) * stride_w >= input_w + pad_left) {
--output_w;
}
if ((output_h - 1) * stride_h >= input_h + pad_up) {
--output_h;
}
}
return RET_OK;
}

int PoolingGradCPUKernel::OnnxPadding(int input_w, int input_h, int &output_w, int &output_h) {
PoolingParameter *pool_param = reinterpret_cast<PoolingParameter *> (opParameter);

auto round_mode = pool_param->round_mode_;
auto stride_w = pool_param->stride_w_;
auto stride_h = pool_param->stride_h_;
auto window_w = pool_param->window_w_;
auto window_h = pool_param->window_h_;
auto pad_up = pool_param->pad_u_;
auto pad_down = pool_param->pad_d_;
auto pad_left = pool_param->pad_l_;
auto pad_right = pool_param->pad_r_;
if (round_mode == ROUNDMODE_FLOOR) {
output_w = floor((input_w + pad_left + pad_right - window_w) / stride_w + 1);
output_h = floor((input_h + pad_up + pad_down - window_h) / stride_h + 1);
} else if (round_mode == ROUNDMODE_CEIL) {
MS_LOG(ERROR) << "RoundMode_CEIL mode not support.";
} else {
MS_LOG(ERROR) << "OnnxPadding round mode not support.";
}
return RET_OK;
}
#endif

int PoolingGradCPUKernel::Init() { int PoolingGradCPUKernel::Init() {
// InferShape():
// auto *in_tensor = reinterpret_cast<float *>(inputs_.at(0)->Data());
// auto *x_tensor = reinterpret_cast<float *>(inputs_.at(1)->Data());

PoolingParameter *pool_param = reinterpret_cast<PoolingParameter *>(opParameter); PoolingParameter *pool_param = reinterpret_cast<PoolingParameter *>(opParameter);


auto in_shape = inputs_.at(0)->shape(); auto in_shape = inputs_.at(0)->shape();
@@ -127,30 +44,6 @@ int PoolingGradCPUKernel::Init() {
auto *out_tensor = outputs_.front(); auto *out_tensor = outputs_.front();
auto out_shape = out_tensor->shape(); auto out_shape = out_tensor->shape();


#if 0
int output_w = 0, output_h = 0;
auto fmk_type = pool_param->fmk_type_;
switch (fmk_type) {
case lite::FmkType_TF:
break;
case lite::FmkType_CAFFE:
CaffePadding(input_w, input_h, output_w, output_h);
break;
case lite::FmkType_ONNX:
OnnxPadding(input_w, input_h, output_w, output_h);
break;
case lite::FmkType_MS:
break;
case lite::FmkType_TFLITE:
TfPadding(input_w, input_h, output_w, output_h);
break;
default:
MS_LOG(ERROR) << "Not support this framework.";
}
std::vector<int> out_shape{in_tensor->shape()};
out_shape.at(1) = output_h;
out_shape.at(2) = output_w;
#endif
out_tensor->set_shape(out_shape); out_tensor->set_shape(out_shape);
out_tensor->set_data_type(inputs_.at(0)->data_type()); out_tensor->set_data_type(inputs_.at(0)->data_type());
return RET_OK; return RET_OK;


+ 0
- 3
mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h View File

@@ -35,9 +35,6 @@ class PoolingGradCPUKernel : public LiteKernel {
: LiteKernel(parameter, inputs, outputs, ctx, primitive) {} : LiteKernel(parameter, inputs, outputs, ctx, primitive) {}
~PoolingGradCPUKernel() override = default; ~PoolingGradCPUKernel() override = default;


// int TfPadding(int input_w, int input_h, int &output_w, int &output_h);
// int CaffePadding(int input_w, int input_h, int &output_w, int &output_h);
// int OnnxPadding(int input_w, int input_h, int &output_w, int &output_h);


int Init() override; int Init() override;
int ReSize() override; int ReSize() override;


+ 0
- 6
mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.cc View File

@@ -60,9 +60,6 @@ int BatchnormInt8CPUKernel::InitConstTensor() {
return RET_ERROR; return RET_ERROR;
} }
// compute alpha, beta; // compute alpha, beta;
// 0. tmp = (S4 * Sqrt(e + S3 * (q3 - Z3)));
// 1. A = S1 / tmp;
// 2. B = Z4 - (A1 * Z1) -((S2 * (q2 - Z2)) / tmp;
auto eps = batchnorm_param_->epsilon_; auto eps = batchnorm_param_->epsilon_;
auto zp_in = input->GetQuantParams().front().zeroPoint; auto zp_in = input->GetQuantParams().front().zeroPoint;
auto zp_mean = mean->GetQuantParams().front().zeroPoint; auto zp_mean = mean->GetQuantParams().front().zeroPoint;
@@ -107,9 +104,6 @@ int BatchnormInt8CPUKernel::InitFusedConstTensor() {
return RET_ERROR; return RET_ERROR;
} }
// compute alpha, beta; // compute alpha, beta;
// 0. tmp = (S6 * Sqrt(e + S5 * (q5 - Z5)));
// 1. A = S1 * S2 * (q2 - Z2) / tmp;
// 2. B = Z6 - (A1 * Z1) -((S3 * (q3 - Z3)) / S6 - S2 * S4 * (q2 - Z4) * (q4 - z4) / tmp;
auto eps = batchnorm_param_->epsilon_; auto eps = batchnorm_param_->epsilon_;
auto zp_in = input->GetQuantParams().front().zeroPoint; auto zp_in = input->GetQuantParams().front().zeroPoint;
auto zp_scale = scale->GetQuantParams().front().zeroPoint; auto zp_scale = scale->GetQuantParams().front().zeroPoint;


+ 0
- 12
mindspore/lite/src/runtime/opencl/opencl_wrapper.cc View File

@@ -168,7 +168,6 @@ bool OpenCLWrapper::LoadLibraryFromPath(const std::string &library_path) {
LOAD_OPENCL_FUNCTION_PTR(clCreateImage); LOAD_OPENCL_FUNCTION_PTR(clCreateImage);
#endif #endif
#if CL_HPP_TARGET_OPENCL_VERSION >= 200 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
// LOAD_OPENCL_FUNCTION_PTR(clGetKernelSubGroupInfoKHR);
LOAD_OPENCL_FUNCTION_PTR(clCreateCommandQueueWithProperties); LOAD_OPENCL_FUNCTION_PTR(clCreateCommandQueueWithProperties);
LOAD_OPENCL_FUNCTION_PTR(clGetExtensionFunctionAddress); LOAD_OPENCL_FUNCTION_PTR(clGetExtensionFunctionAddress);
LOAD_OPENCL_FUNCTION_PTR(clSVMAlloc); LOAD_OPENCL_FUNCTION_PTR(clSVMAlloc);
@@ -614,17 +613,6 @@ cl_mem clCreateImage(cl_context context, cl_mem_flags flags, const cl_image_form
#endif #endif


#if CL_HPP_TARGET_OPENCL_VERSION >= 200 #if CL_HPP_TARGET_OPENCL_VERSION >= 200
#if 0
// clGetKernelSubGroupInfoKHR wrapper, use OpenCLWrapper function.
cl_int clGetKernelSubGroupInfoKHR(cl_kernel kernel, cl_device_id device, cl_kernel_sub_group_info param_name,
size_t input_value_size, const void *input_value, size_t param_value_size,
void *param_value, size_t *param_value_size_ret) {
auto func = mindspore::lite::opencl::OpenCLWrapper::GetInstance()->clGetKernelSubGroupInfoKHR;
MS_ASSERT(func != nullptr);
return func(kernel, device, param_name, input_value_size, input_value, param_value_size, param_value,
param_value_size_ret);
}
#endif


// clCreateCommandQueueWithProperties wrapper, use OpenCLWrapper function. // clCreateCommandQueueWithProperties wrapper, use OpenCLWrapper function.
cl_command_queue clCreateCommandQueueWithProperties(cl_context context, cl_device_id device, cl_command_queue clCreateCommandQueueWithProperties(cl_context context, cl_device_id device,


+ 0
- 3
mindspore/lite/src/runtime/thread_pool.c View File

@@ -88,7 +88,6 @@ static atomic_bool thread_pool_is_created[MAX_THREAD_POOL_NUM] = {ATOMIC_VAR_INI
ThreadPool *GetInstance(int thread_pool_id) { ThreadPool *GetInstance(int thread_pool_id) {
if (thread_pool_id < 0 || thread_pool_id >= MAX_THREAD_POOL_NUM) { if (thread_pool_id < 0 || thread_pool_id >= MAX_THREAD_POOL_NUM) {
LOG_ERROR("invaid context id: %d", thread_pool_id); LOG_ERROR("invaid context id: %d", thread_pool_id);
// DestroyThreadPool(thread_pool_id);
return NULL; return NULL;
} }
return &thread_pool_list[thread_pool_id]; return &thread_pool_list[thread_pool_id];
@@ -434,7 +433,6 @@ bool PushTaskToQueue(int thread_pool_id, int thread_id, Task *task) {
thread->task_list[tail_index] = task; thread->task_list[tail_index] = task;
atomic_store_explicit(&thread->tail, next, memory_order_release); atomic_store_explicit(&thread->tail, next, memory_order_release);
atomic_fetch_add_explicit(&thread->task_size, 1, memory_order_relaxed); atomic_fetch_add_explicit(&thread->task_size, 1, memory_order_relaxed);
// atomic_store_explicit(&thread->task_size, thread->task_size + 1, memory_order_relaxed);
sem_post(&thread->sem); sem_post(&thread->sem);
return true; return true;
} }
@@ -552,7 +550,6 @@ void ThreadRun(Thread *thread) {
} }
task->func(task->content, thread_id); task->func(task->content, thread_id);
atomic_fetch_sub_explicit(&thread->task_size, 1, memory_order_relaxed); atomic_fetch_sub_explicit(&thread->task_size, 1, memory_order_relaxed);
// atomic_store_explicit(&thread->task_size, thread->task_size - 1, memory_order_relaxed);
spin_count = 0; spin_count = 0;
sem_trywait(&thread->sem); sem_trywait(&thread->sem);
} else { } else {


+ 0
- 2
mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reverse_sequence_parser_test.cc View File

@@ -39,7 +39,5 @@ TEST_F(TestTfliteParserReverseSequence, AttrValue) {
auto val = meta_graph->nodes.front()->primitive->value.AsReverseSequence(); auto val = meta_graph->nodes.front()->primitive->value.AsReverseSequence();
ASSERT_EQ(val->seqAxis, 1); ASSERT_EQ(val->seqAxis, 1);
ASSERT_EQ(val->seqAxis, 1); ASSERT_EQ(val->seqAxis, 1);
std::vector<int> seq_length = {7, 2, 3, 5};
ASSERT_EQ(val->seqLengths, seq_length);
} }
} // namespace mindspore } // namespace mindspore

+ 0
- 6
mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_sparse_to_dense_parser_test.cc View File

@@ -37,12 +37,6 @@ TEST_F(TestTfliteParserSparseToDense, OpType) {
TEST_F(TestTfliteParserSparseToDense, AttrValue) { TEST_F(TestTfliteParserSparseToDense, AttrValue) {
ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsSparseToDense(), nullptr); ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsSparseToDense(), nullptr);
auto val = meta_graph->nodes.front()->primitive->value.AsSparseToDense(); auto val = meta_graph->nodes.front()->primitive->value.AsSparseToDense();
std::vector<int> outputShape = {5, 5};
ASSERT_EQ(val->outputShape, outputShape);
std::vector<int> sparseValue = {1};
ASSERT_EQ(val->sparseValue, sparseValue);
std::vector<int> defaultValue = {0};
ASSERT_EQ(val->defaultValue, defaultValue);
ASSERT_EQ(val->validateIndices, false); ASSERT_EQ(val->validateIndices, false);
} }
} // namespace mindspore } // namespace mindspore

+ 0
- 128
mindspore/lite/tools/anf_importer/anf_importer.cc View File

@@ -24,134 +24,6 @@
#include "schema/inner/model_generated.h" #include "schema/inner/model_generated.h"
namespace mindspore { namespace mindspore {
namespace lite { namespace lite {
#if 0
PrimitivePtr SetConv2DAttr(const schema::CNode *cNode) {
MS_EXCEPTION_IF_NULL(cNode);
auto attrs = cNode->primitive()->value_as_Conv2D();
PrimitivePtr prim;
if (attrs->group() > 1) {
prim = std::make_shared<Primitive>("DepthwiseConv2D");
prim->set_instance_name("DepthwiseConv2D");
} else {
prim = std::make_shared<Primitive>("Conv2D");
prim->set_instance_name("Conv2D");
}

prim->set_attr("group", MakeValue<int>(attrs->group()));
prim->set_attr("format", MakeValue<int>(attrs->format()));
prim->set_attr("pad_mode", MakeValue<int>(attrs->padMode()));
std::vector<int> pad_list = {attrs->padUp(), attrs->padDown(), attrs->padLeft(), attrs->padRight()};
prim->set_attr("pad_list", MakeValue<std::vector<int>>(pad_list));
std::vector<int> dilate = {attrs->dilateH(), attrs->dilateW()};
prim->set_attr("dilation", MakeValue<std::vector<int>>(dilate));
std::vector<int> kernel_size = {attrs->kernelH(), attrs->kernelW()};
prim->set_attr("kernel_size", MakeValue<std::vector<int>>(kernel_size));
std::vector<int> stride = {1, 1, attrs->strideH(), attrs->strideW()};
prim->set_attr("stride", MakeValue<std::vector<int>>(stride));
prim->set_attr("out_channel", MakeValue<int>(attrs->channelOut()));
prim->set_attr("group", MakeValue<int>(attrs->group()));
return prim;
}

PrimitivePtr SetActivationAttr(const schema::CNode *cNode) {
MS_EXCEPTION_IF_NULL(cNode);
auto attrs = cNode->primitive()->value_as_Activation();
PrimitivePtr prim;
if (attrs->type() == schema::ActivationType_RELU) {
prim = std::make_shared<Primitive>("ReLU");
prim->set_instance_name("ReLU");
}
return prim;
}

PrimitivePtr SetPoolingAttr(const schema::CNode *cNode) {
MS_EXCEPTION_IF_NULL(cNode);
auto attrs = cNode->primitive()->value_as_Pooling();
PrimitivePtr prim;
if (attrs->poolingMode() == schema::PoolMode_MAX_POOLING) {
prim = std::make_shared<Primitive>("MaxPool");
prim->set_instance_name("MaxPool");
} else if (attrs->poolingMode() == schema::PoolMode_MEAN_POOLING) {
prim = std::make_shared<Primitive>("MeanPool");
prim->set_instance_name("MeanPool");
}

prim->set_attr("format", MakeValue<int>(attrs->format()));
prim->set_attr("pad_mode", MakeValue<int>(attrs->padMode()));
prim->set_attr("ksize", MakeValue<std::vector<int>>(std::vector<int>({1, 1, attrs->windowH(), attrs->windowW()})));
prim->set_attr("strides", MakeValue<std::vector<int>>(std::vector<int>({1, 1, attrs->strideH(), attrs->strideW()})));
return prim;
}

PrimitivePtr SetFlattenAttr(const schema::CNode *cNode) {
MS_EXCEPTION_IF_NULL(cNode);
auto prim = std::make_shared<Primitive>("Flatten");
prim->set_instance_name("Flatten");
return prim;
}

PrimitivePtr SetMatmulAttr(const schema::CNode *cNode) {
MS_EXCEPTION_IF_NULL(cNode);
auto attrs = cNode->primitive()->value_as_MatMul();
auto prim = std::make_shared<Primitive>("Matmul");
prim->set_instance_name("Matmul");
prim->set_attr("transpose_a", MakeValue<int>(attrs->transposeA()));
prim->set_attr("transpose_b", MakeValue<int>(attrs->transposeB()));
return prim;
}

PrimitivePtr SetMulAttr(const schema::CNode *cNode) {
MS_EXCEPTION_IF_NULL(cNode);
// auto attrs = nodedef->attr_as_Mul();
auto prim = std::make_shared<Primitive>("Mul");
prim->set_instance_name("Mul");
return prim;
}

PrimitivePtr SetSigmoidAttr(const schema::CNode *cNode) {
MS_EXCEPTION_IF_NULL(cNode);
auto prim = std::make_shared<Primitive>("Sigmoid");
prim->set_instance_name("Sigmoid");
return prim;
}

PrimitivePtr SetReduceAttr(const schema::CNode *cNode) {
MS_EXCEPTION_IF_NULL(cNode);
auto prim = std::make_shared<Primitive>("ReduceMean");
prim->set_instance_name("ReduceMean");
return prim;
}

PrimitivePtr SetBatchNormAttr(const schema::CNode *cNode) {
MS_EXCEPTION_IF_NULL(cNode);
auto attrs = cNode->primitive_as_BatchNorm();
auto prim = std::make_shared<Primitive>("BatchNorm");
prim->set_attr("is_training", MakeValue<bool>(attrs->is_training()));
prim->set_instance_name("BatchNorm");
return prim;
}

PrimitivePtr SetBiasAddAttr(const schema::CNode *cNode) {
MS_EXCEPTION_IF_NULL(cNode);
auto prim = std::make_shared<Primitive>("BiasAdd");
prim->set_instance_name("BiasAdd");
return prim;
}

PrimitivePtr SetAddAttr(const schema::CNode *cNode) {
MS_EXCEPTION_IF_NULL(cNode);
auto prim = std::make_shared<Primitive>("Add");
prim->set_instance_name("Add");
return prim;
}

void MinnieBuildGraph::FbTest(const GraphDef *graph_def) {
auto node_def = graph_def->subgraphs()->begin()->nodes()->GetAs<OpDef>(3);
PrimitivePtr prim = ConverterOperatorAttr(node_def);
if (prim->GetAttr("format")) MS_LOG(INFO) << "find format";
if (prim->GetAttr("group")) MS_LOG(INFO) << "find group";
}
#endif


int AnfImporter::Import(const schema::QuantType &quantType) { int AnfImporter::Import(const schema::QuantType &quantType) {
auto ret = ConverterConstTensor(); auto ret = ConverterConstTensor();


+ 0
- 11
mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc View File

@@ -60,17 +60,6 @@ int AnfImporterFromMetaGraphT::ConverterConstTensor() {
param_value->set_tensor_addr(tensor_data); param_value->set_tensor_addr(tensor_data);
param_value->set_tensor_size(size); param_value->set_tensor_size(size);
} }
// if (!tensor->quantParams.empty()) {
// std::unique_ptr<AnfQuantParam> quantParam = std::make_unique<AnfQuantParam>();
// quantParam->scale = tensor->quantParams[0]->scale;
// quantParam->zeroPoint = tensor->quantParams[0]->zeroPoint;
// quantParam->min = tensor->quantParams[0]->min;
// quantParam->max = tensor->quantParams[0]->max;
// quantParam->narrowRange = tensor->quantParams[0]->narrowRange;
// quantParam->numBits = tensor->quantParams[0]->numBits;
// quantParam->inited = tensor->quantParams[0]->inited;
// param_value->set_quant_param(quantParam);
// }
parameter->set_default_param(param_value); parameter->set_default_param(param_value);
AddNode(i, parameter); AddNode(i, parameter);
} }


+ 0
- 33
mindspore/lite/tools/anf_importer/import_from_protobuf.h View File

@@ -48,37 +48,6 @@ class AnfImporterFromProtobuf : public AnfImporter {
bool ParseModelConfigureInfo(const onnx::ModelProto &model_proto); bool ParseModelConfigureInfo(const onnx::ModelProto &model_proto);
bool BuildFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto, bool BuildFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto,
const schema::QuantType &quantType); const schema::QuantType &quantType);
#if 0
bool ImportParametersForGraph(const FuncGraphPtr &outputFuncGraph,
const onnx::GraphProto &importProto);
bool ImportNodesForGraph(const FuncGraphPtr &outputFuncGraph,
const onnx::GraphProto &importProto);
bool BuildParameterForFuncGraph(const ParameterPtr &node,
const onnx::ValueInfoProto &value_proto);
CNodePtr BuildCNodeForFuncGraph(const FuncGraphPtr &outputFuncGraph,
const onnx::NodeProto &node_proto);
bool BuildReturnForFuncGraph(const FuncGraphPtr &outputFuncGraph,
const onnx::GraphProto &importProto,
const CNodePtr &cnode_ptr);
bool GetAttrValueForCNode(const PrimitivePtr &prim,
const onnx::AttributeProto &attr_proto);
bool ObtainCNodeAttrInTypeForm(const PrimitivePtr &prim,
const std::string &attr_name,
const onnx::TensorProto &attr_tensor);
ValuePtr ObtainCNodeAttrInScalarForm(const onnx::TensorProto &attr_tensor);
bool ObtainCNodeAttrInTensorForm(const PrimitivePtr &prim,
const std::string &attr_name,
const onnx::TensorProto &attr_tensor);
bool BuildValueNodeForFuncGraph(const onnx::NodeProto &node_proto);
bool ObtainValueNodeInTensorForm(const std::string &value_node_name,
const onnx::TensorProto &attr_tensor);
bool GetAttrValueForValueNode(const std::string &value_node_name,
const onnx::AttributeProto &attr_tensor);
bool ObtainValueNodeInTypeForm(const std::string &value_node_name,
const onnx::TensorProto &attr_tensor);
std::unordered_map<std::string, abstract::AbstractTensorPtr>
GetAbstractForCNode(const onnx::AttributeProto &attr_proto);
#else
bool ImportParametersForGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto); bool ImportParametersForGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto);
bool ImportNodesForGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto, bool ImportNodesForGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto,
const schema::QuantType &quantType); const schema::QuantType &quantType);
@@ -103,8 +72,6 @@ class AnfImporterFromProtobuf : public AnfImporter {
bool ObtainValueNodeInTypeForm(const string &value_node_name, const onnx::TensorProto &attr_tensor); bool ObtainValueNodeInTypeForm(const string &value_node_name, const onnx::TensorProto &attr_tensor);
abstract::AbstractTensorPtr GetAbstractForCNode(const onnx::AttributeProto &attr_proto); abstract::AbstractTensorPtr GetAbstractForCNode(const onnx::AttributeProto &attr_proto);


#endif

private: private:
std::string producer_name_; std::string producer_name_;
int model_version_{}; int model_version_{};


+ 2
- 5
mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.cc View File

@@ -54,16 +54,13 @@ STATUS TfliteReverseSequenceParser::Parse(const std::unique_ptr<tflite::Operator
attr->seqAxis = tflite_attr->seq_dim; attr->seqAxis = tflite_attr->seq_dim;
attr->batchAxis = tflite_attr->batch_dim; attr->batchAxis = tflite_attr->batch_dim;


if (GetTfliteData(tflite_op->inputs[1], tflite_tensors, tflite_model_buffer, attr->seqLengths)) {
MS_LOG(ERROR) << "get reverse_sequence -> seqLengths failed";
return RET_ERROR;
}

op->primitive->value.type = schema::PrimitiveType_ReverseSequence; op->primitive->value.type = schema::PrimitiveType_ReverseSequence;
op->primitive->value.value = attr.release(); op->primitive->value.value = attr.release();


AddOpInput(op, tensors_id, tensors_format, tensors_id_map, AddOpInput(op, tensors_id, tensors_format, tensors_id_map,
tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); tflite_op->inputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
AddOpInput(op, tensors_id, tensors_format, tensors_id_map,
tflite_op->inputs[1], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
AddOpOutput(op, tensors_id, tensors_format, tensors_id_map, AddOpOutput(op, tensors_id, tensors_format, tensors_id_map,
tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC); tflite_op->outputs[0], tensors_id->size(), tflite_tensors.size(), schema::Format_NHWC);
return RET_OK; return RET_OK;


+ 0
- 14
mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.cc View File

@@ -47,20 +47,6 @@ STATUS TfliteSparseToDenseParser::Parse(const std::unique_ptr<tflite::OperatorT>
} }


attr->validateIndices = false; attr->validateIndices = false;

if (GetTfliteData(tflite_op->inputs[1], tflite_tensors, tflite_model_buffer, attr->outputShape)) {
MS_LOG(ERROR) << "get sparseToDense -> outputShape failed";
return RET_ERROR;
}
if (GetTfliteData(tflite_op->inputs[2], tflite_tensors, tflite_model_buffer, attr->sparseValue)) {
MS_LOG(ERROR) << "get sparseToDense -> sparseValue failed";
return RET_ERROR;
}
if (GetTfliteData(tflite_op->inputs[3], tflite_tensors, tflite_model_buffer, attr->defaultValue)) {
MS_LOG(ERROR) << "get sparseToDense -> defaultValue failed";
return RET_ERROR;
}

op->primitive->value.type = schema::PrimitiveType_SparseToDense; op->primitive->value.type = schema::PrimitiveType_SparseToDense;
op->primitive->value.value = attr.release(); op->primitive->value.value = attr.release();




Loading…
Cancel
Save