diff --git a/mindspore/lite/nnacl/fp32/resize.c b/mindspore/lite/nnacl/fp32/resize.c index 0953631347..39465783eb 100644 --- a/mindspore/lite/nnacl/fp32/resize.c +++ b/mindspore/lite/nnacl/fp32/resize.c @@ -91,13 +91,13 @@ int ResizeBilinear(const float *input_data, float *output_data, const int *input int y_bottom = y_bottoms[h]; int y_top = y_tops[h]; float y_bottom_weight = y_bottom_weights[h]; - float y_top_weight = 1.0f - y_bottom_weight; + const float y_top_weight = 1.0f - y_bottom_weight; for (w = 0; w < new_width; w++) { int x_left = x_lefts[w]; int x_right = x_rights[w]; float x_left_weight = x_left_weights[w]; - float x_right_weight = 1.0f - x_left_weight; + const float x_right_weight = 1.0f - x_left_weight; float top_left_weight = y_top_weight * x_left_weight; float top_right_weight = y_top_weight * x_right_weight; float bottom_left_weight = y_bottom_weight * x_left_weight; diff --git a/mindspore/lite/nnacl/fp32/roi_pooling.c b/mindspore/lite/nnacl/fp32/roi_pooling.c index 1f4f1b9839..26f5ff8481 100644 --- a/mindspore/lite/nnacl/fp32/roi_pooling.c +++ b/mindspore/lite/nnacl/fp32/roi_pooling.c @@ -41,6 +41,7 @@ int ROIPooling(float *in_ptr, float *out_ptr, float *roi, int tid, ROIPoolingPar for (int i = roi_st; i < roi_end; ++i) { int roi_batch_ind = (int)roi[roi_ind_st]; // batch_index if (roi_batch_ind >= batch_size) { + free(max_c); return NNACL_ERRCODE_INDEX_OUT_OF_RANGE; } int roi_start_h = (int)roundf(roi[roi_ind_st + 1] * scale); // top-left x1 diff --git a/mindspore/lite/nnacl/int8/leaky_relu_int8.c b/mindspore/lite/nnacl/int8/leaky_relu_int8.c index 540a7f898a..7498b8120b 100644 --- a/mindspore/lite/nnacl/int8/leaky_relu_int8.c +++ b/mindspore/lite/nnacl/int8/leaky_relu_int8.c @@ -15,17 +15,21 @@ */ #include "nnacl/int8/leaky_relu_int8.h" +#include "nnacl/errorcode.h" -void DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_prelu_parm, int task_id) { +int DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_prelu_parm, int task_id) { if (quant_prelu_parm == NULL) { - return; + return NNACL_NULL_PTR; } float output_scale = quant_prelu_parm->quant_arg.out_args_.scale_; int output_zp = quant_prelu_parm->quant_arg.out_args_.zp_; const float output_inverse_scale = 1.f / output_scale; int output_dim = quant_prelu_parm->input_dim_; - QuantArg *input_quant = NULL; + QuantArg *input_quant = malloc(sizeof(QuantArg)*output_dim); + if (input_quant == NULL) { + return NNACL_NULL_PTR; + } for (int i = 0; i < output_dim; i++) { input_quant[i].scale_ = quant_prelu_parm->quant_arg.in_args_.scale_; input_quant[i].zp_ = quant_prelu_parm->quant_arg.in_args_.zp_; @@ -56,4 +60,6 @@ void DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant } } } + free(input_quant); + return NNACL_OK; } diff --git a/mindspore/lite/nnacl/int8/leaky_relu_int8.h b/mindspore/lite/nnacl/int8/leaky_relu_int8.h index 89208bb655..afe8c1934f 100644 --- a/mindspore/lite/nnacl/int8/leaky_relu_int8.h +++ b/mindspore/lite/nnacl/int8/leaky_relu_int8.h @@ -23,7 +23,7 @@ #ifdef __cplusplus extern "C" { #endif -void DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_Prelu_parm, int task_id); +int DoLeakReluInt8(int8_t *inputs, int8_t *output_ptr, LeakyReluQuantArg *quant_Prelu_parm, int task_id); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/int8/resize.c b/mindspore/lite/nnacl/int8/resize.c index 9c7bcdee1b..2f10130738 100644 --- a/mindspore/lite/nnacl/int8/resize.c +++ b/mindspore/lite/nnacl/int8/resize.c @@ -101,8 +101,14 @@ int ResizeBilinearInt8WithFloatWeight(const int8_t *input_data, int8_t *output_d int32_t new_height = output_shape[1]; int32_t new_width = output_shape[2]; float height_scale, width_scale; - ComputeScaleFloat(in_h, new_height, align_corners, &height_scale); - ComputeScaleFloat(in_w, new_width, align_corners, &width_scale); + int ret = ComputeScaleFloat(in_h, new_height, align_corners, &height_scale); + if (ret != NNACL_OK) { + return ret; + } + ret = ComputeScaleFloat(in_w, new_width, align_corners, &width_scale); + if (ret != NNACL_OK) { + return ret; + } int n, h, w, c; for (n = 0; n < in_n; n++) { @@ -189,11 +195,15 @@ void ComputeInterpolationArgs(const int32_t pos, const int32_t scale, const int3 *scaled_high_weight = *scaled_pos - (1 << 10) * (*low); } -void ComputeScaleFloat(const int32_t in_value, const int32_t out_value, const bool align_corners, float *scale) { +int ComputeScaleFloat(const int32_t in_value, const int32_t out_value, const bool align_corners, float *scale) { + if (out_value == 0) { + return NNACL_ERRCODE_DIVISOR_ZERO; + } *scale = (float)in_value / out_value; if (align_corners && out_value > 1) { *scale = (float)(in_value - 1) / (out_value - 1); } + return NNACL_OK; } void ComputeInterpolationArgsFloatWeight(const int32_t pos, const float scale, const int32_t size, float *actual_pos, diff --git a/mindspore/lite/nnacl/int8/resize.h b/mindspore/lite/nnacl/int8/resize.h index 67a56da8c0..d13c78c90c 100644 --- a/mindspore/lite/nnacl/int8/resize.h +++ b/mindspore/lite/nnacl/int8/resize.h @@ -40,7 +40,7 @@ void ComputeScale(const int32_t in_value, const int32_t out_value, const bool al void ComputeInterpolationArgs(const int32_t pos, const int32_t scale, const int32_t size, int32_t *scaled_pos, int32_t *low, int32_t *scaled_low_weight, int32_t *high, int32_t *scaled_high_weight); -void ComputeScaleFloat(const int32_t in_value, const int32_t out_value, const bool align_corners, float *scale); +int ComputeScaleFloat(const int32_t in_value, const int32_t out_value, const bool align_corners, float *scale); void ComputeInterpolationArgsFloatWeight(const int32_t pos, const float scale, const int32_t size, float *actual_pos, int32_t *low, float *low_weight, int32_t *high, float *high_weight); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc index 84addcaa2f..909e05620b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc @@ -352,6 +352,7 @@ int ConvolutionBaseCPUKernel::RestoreFilter(lite::tensor::Tensor *input_tensor) size_t channels = static_cast(input_tensor->Batch()); if (input_tensor->GetQuantParams().size() != channels) { MS_LOG(ERROR) << "Quant param not equal channel num " << input_tensor->GetQuantParams().size() << channels; + free(dequant_data); return RET_ERROR; } size_t per_channel_size = input_tensor->DataSize() / channels; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc index af6b6fd94e..d38e16c517 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc @@ -20,6 +20,7 @@ #include "src/runtime/runtime_api.h" #include "src/kernel_registry.h" #include "include/errorcode.h" +#include "nnacl/errorcode.h" using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; @@ -105,7 +106,11 @@ int LeakyReluInt8CPUKernel::DoExecute(int task_id) { auto out_tensor = out_tensors_.at(kOutputIndex); int8_t *input_data = reinterpret_cast(input_tensor->Data()); int8_t *output_data = reinterpret_cast(out_tensor->Data()); - DoLeakReluInt8(input_data, output_data, &quant_prelu_parm_, task_id); + auto ret = DoLeakReluInt8(input_data, output_data, &quant_prelu_parm_, task_id); + if (ret != NNACL_OK) { + MS_LOG(ERROR) << "DoLeakReluInt8 failed"; + return RET_ERROR; + } return RET_OK; } diff --git a/mindspore/lite/src/runtime/thread_pool.c b/mindspore/lite/src/runtime/thread_pool.c index ed4656b469..5f134ff702 100644 --- a/mindspore/lite/src/runtime/thread_pool.c +++ b/mindspore/lite/src/runtime/thread_pool.c @@ -500,12 +500,11 @@ int DistributeTask(int thread_pool_id, Task *task, int task_num) { } while (!k_success_flag); } // master thread - task->func(task->content, size - 1); if (task->func == NULL) { LOG_ERROR("task->func is nullptr"); return RET_TP_ERROR; } - + task->func(task->content, size - 1); // wait WaitAllThread(thread_pool_id); return RET_TP_OK; @@ -547,11 +546,11 @@ void ThreadRun(Thread *thread) { while (thread_pool->is_alive) { while (thread->activate) { if (PopTaskFromQueue(thread, &task)) { - task->func(task->content, thread_id); if (task->func == NULL) { LOG_ERROR("task->func is nullptr"); return; } + task->func(task->content, thread_id); atomic_fetch_sub_explicit(&thread->task_size, 1, memory_order_relaxed); // atomic_store_explicit(&thread->task_size, thread->task_size - 1, memory_order_relaxed); spin_count = 0; diff --git a/model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.cpp b/model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.cpp index acf087a07d..e5ddf6b31b 100644 --- a/model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.cpp +++ b/model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.cpp @@ -54,6 +54,8 @@ int MSNetWork::ReleaseNets(void) { return 0; } +const int MSNetWork::RET_CATEGORY_SUM = 601; + const char *MSNetWork::labels_name_map[MSNetWork::RET_CATEGORY_SUM] = { {"Tortoise"}, {"Container"}, {"Magpie"}, {"Seaturtle"}, {"Football"}, {"Ambulance"}, {"Ladder"}, {"Toothbrush"}, {"Syringe"}, {"Sink"}, {"Toy"}, {"Organ(MusicalInstrument) "}, {"Cassettedeck"}, diff --git a/model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.h b/model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.h index 27da5f5dfa..d1c3dbace0 100644 --- a/model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.h +++ b/model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.h @@ -32,7 +32,6 @@ #include #include - struct ImgDims { int channel = 0; int width = 0; @@ -43,8 +42,6 @@ struct ImgDims { std::shared_ptr sess = nullptr; };*/ - - class MSNetWork { public: MSNetWork(); @@ -55,10 +52,10 @@ class MSNetWork { int ReleaseNets(void); + private: mindspore::session::LiteSession *session; mindspore::lite::Model *model; - static const int RET_CATEGORY_SUM = 601; + static const int RET_CATEGORY_SUM; static const char *labels_name_map[RET_CATEGORY_SUM]; }; - #endif diff --git a/model_zoo/official/lite/image_classification/app/src/main/cpp/MindSporeNetnative.cpp b/model_zoo/official/lite/image_classification/app/src/main/cpp/MindSporeNetnative.cpp index fdf2e1c827..f58394c0bd 100644 --- a/model_zoo/official/lite/image_classification/app/src/main/cpp/MindSporeNetnative.cpp +++ b/model_zoo/official/lite/image_classification/app/src/main/cpp/MindSporeNetnative.cpp @@ -76,10 +76,10 @@ cv::Mat PreProcessImageData(cv::Mat input) { imgFloatTmp.convertTo(imgResized256, CV_32FC3, normalizMin / normalizMax); - int offsetX = 16; - int offsetY = 16; - int cropWidth = 224; - int cropHeight = 224; + const int offsetX = 16; + const int offsetY = 16; + const int cropWidth = 224; + const int cropHeight = 224; // Standardization processing. float meanR = 0.485;