Browse Source

!21478 [MSLITE] fp32 bug fix

Merge pull request !21478 from ling/bug
tags/v1.5.0-rc1
i-robot Gitee 4 years ago
parent
commit
c0c1d0722c
5 changed files with 14 additions and 6 deletions
  1. +5
    -2
      mindspore/lite/micro/coder/graph.cc
  2. +4
    -0
      mindspore/lite/micro/coder/train.cc
  3. +1
    -0
      mindspore/lite/micro/coder/wrapper/int8/conv1x1_init_int8_wrapper.c
  4. +2
    -2
      mindspore/lite/src/runtime/kernel/arm/fp32/bias_fp32.cc
  5. +2
    -2
      mindspore/lite/src/runtime/kernel/arm/fp32/gather_fp32.cc

+ 5
- 2
mindspore/lite/micro/coder/graph.cc View File

@@ -30,8 +30,11 @@

namespace mindspore::lite::micro {
CoderGraph::~CoderGraph() {
model_->Free();
delete model_;
if (model_ != nullptr) {
model_->Free();
delete model_;
model_ = nullptr;
}
for (auto &tensor : all_tensors_) {
delete tensor;
}


+ 4
- 0
mindspore/lite/micro/coder/train.cc View File

@@ -55,6 +55,10 @@ std::set<OperatorCoder *> FindInferenceOpcoders(OperatorCoder *edge) {
}

int Train::TransformGraphForTrain(CoderContext *context, const std::vector<std::unique_ptr<OperatorCoder>> &op_coders) {
if (context == nullptr) {
MS_LOG(INFO) << "input context invalid";
return RET_ERROR;
}
const std::array<int, 6> loss_types = {schema::PrimitiveType_SparseSoftmaxCrossEntropyWithLogits,
schema::PrimitiveType_BinaryCrossEntropy,
schema::PrimitiveType_SmoothL1Loss,


+ 1
- 0
mindspore/lite/micro/coder/wrapper/int8/conv1x1_init_int8_wrapper.c View File

@@ -65,6 +65,7 @@ int Conv1x1Init(int8_t *src_weight, int32_t *src_bias, int32_t *filter_zps, int3
int32_t *bias_data_ = (int32_t *)malloc(size * sizeof(int32_t));
if (bias_data_ == NULL) {
free(packed_weight_);
packed_weight_ = NULL;
return NNACL_ERR;
}
memset(bias_data_, 0, size * sizeof(int32_t));


+ 2
- 2
mindspore/lite/src/runtime/kernel/arm/fp32/bias_fp32.cc View File

@@ -47,7 +47,7 @@ int BiasCPUKernel::Run() {
auto in = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
auto bias = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData());
auto out = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData());
size_t data_size = in_tensors_.at(0)->ElementsNum();
size_t data_size = static_cast<size_t>(in_tensors_.at(0)->ElementsNum());
MS_ASSERT(ms_context_->allocator != nullptr);
float *tile_in = reinterpret_cast<float *>(ms_context_->allocator->Malloc(data_size * sizeof(float)));
float *tile_bias = reinterpret_cast<float *>(ms_context_->allocator->Malloc(data_size * sizeof(float)));
@@ -57,7 +57,7 @@ int BiasCPUKernel::Run() {
ms_context_->allocator->Free(tile_bias);
return RET_ERROR;
}
auto ret = BroadcastAdd(in, bias, tile_in, tile_bias, out, data_size, bias_param_);
auto ret = BroadcastAdd(in, bias, tile_in, tile_bias, out, static_cast<int>(data_size), bias_param_);
ms_context_->allocator->Free(tile_in);
ms_context_->allocator->Free(tile_bias);
return ret;


+ 2
- 2
mindspore/lite/src/runtime/kernel/arm/fp32/gather_fp32.cc View File

@@ -65,7 +65,7 @@ int GatherCPUKernel::DoGather(int task_id) {
int8_t *int8_in = reinterpret_cast<int8_t *>(input_tensor->data_c());
int8_t *int8_out = reinterpret_cast<int8_t *>(out_tensor->data_c());

int data_size = lite::DataTypeSize(input_tensor->data_type());
int data_size = static_cast<int>(lite::DataTypeSize(input_tensor->data_type()));
int8_in += thread_stride * limit * inner_size * data_size;
int8_out += thread_stride * indices_element_size * inner_size * data_size;

@@ -121,7 +121,7 @@ int GatherCPUKernel::AssignIndicesData(bool isIndicesInt32, int indices_num, lit
}
} else {
for (int i = 0; i < indices_num; i++) {
indices_data_[i] = reinterpret_cast<float *>(indices_tensor->MutableData())[i];
indices_data_[i] = static_cast<int>(reinterpret_cast<float *>(indices_tensor->MutableData())[i]);
}
}
} else {


Loading…
Cancel
Save