diff --git a/mindspore/lite/nnacl/common_func.c b/mindspore/lite/nnacl/common_func.c index ec6e632a89..10edd92619 100644 --- a/mindspore/lite/nnacl/common_func.c +++ b/mindspore/lite/nnacl/common_func.c @@ -15,7 +15,6 @@ */ #include "nnacl/common_func.h" -#include "nnacl/quantization/fixed_point.h" int offset(const int *shape, const int dim0, const int dim1, const int dim2, const int dim3) { return ((dim0 * shape[1] + dim1) * shape[2] + dim2) * shape[3] + dim3; diff --git a/mindspore/lite/nnacl/fp32/conv.c b/mindspore/lite/nnacl/fp32/conv.c index 4ad11f31a4..570aa39ceb 100644 --- a/mindspore/lite/nnacl/fp32/conv.c +++ b/mindspore/lite/nnacl/fp32/conv.c @@ -21,7 +21,7 @@ #include "nnacl/fp32/matmul.h" // fp32 conv common -void ConvFp32(float *input_data, float *packed_input, const float *packed_weight, const float *bias_data, +void ConvFp32(const float *input_data, float *packed_input, const float *packed_weight, const float *bias_data, float *col_major_input, float *output_data, int task_id, ConvParameter *conv_param) { int kernel_h = conv_param->kernel_h_; int kernel_w = conv_param->kernel_w_; @@ -70,7 +70,7 @@ void ConvFp32(float *input_data, float *packed_input, const float *packed_weight } // fp32 conv winograd -void ConvWinogardFp32(float *input_data, const float *trans_weight, const float *bias_data, float *output_data, +void ConvWinogardFp32(const float *input_data, const float *trans_weight, const float *bias_data, float *output_data, TmpBufferAddress *buffer_list, int task_id, ConvParameter *conv_param, InputTransFunc in_func, OutputTransFunc out_func) { int thread_num = conv_param->thread_num_; diff --git a/mindspore/lite/nnacl/fp32/conv.h b/mindspore/lite/nnacl/fp32/conv.h index a5e52fdf94..4e9ed71e63 100644 --- a/mindspore/lite/nnacl/fp32/conv.h +++ b/mindspore/lite/nnacl/fp32/conv.h @@ -34,11 +34,11 @@ extern "C" { #endif // fp32 convolution common (im2col+gemm) -void ConvFp32(float *input_data, float *packed_input, const float *packed_weight, const float *bias_data, +void ConvFp32(const float *input_data, float *packed_input, const float *packed_weight, const float *bias_data, float *col_major_input, float *output_data, int task_id, ConvParameter *conv_param); // fp32 convolution winograd -void ConvWinogardFp32(float *input_data, const float *trans_weight, const float *bias_data, float *output_data, +void ConvWinogardFp32(const float *input_data, const float *trans_weight, const float *bias_data, float *output_data, TmpBufferAddress *buffer_list, int task_id, ConvParameter *conv_param, InputTransFunc in_func, OutputTransFunc out_func); #ifdef __cplusplus diff --git a/mindspore/lite/nnacl/fp32/deconv_winograd.c b/mindspore/lite/nnacl/fp32/deconv_winograd.c index 359a135ed3..e6fd751af8 100644 --- a/mindspore/lite/nnacl/fp32/deconv_winograd.c +++ b/mindspore/lite/nnacl/fp32/deconv_winograd.c @@ -50,6 +50,7 @@ int PackDeConvWgDataFp32(float *nhwc_weight, DeConvComputeUnit *unit, ConvParame DECONV_WINOGRAD_DEFAULT_UNIT, unit->h_size_); if (ret != NNACL_OK) { free(current_unit_weight); + current_unit_weight = NULL; return NNACL_ERRCODE_WINOGRAD_GENERATOR_ERROR; } @@ -58,6 +59,7 @@ int PackDeConvWgDataFp32(float *nhwc_weight, DeConvComputeUnit *unit, ConvParame if (unit->winograd_.AT_ == NULL) { if (current_unit_weight != NULL) { free(current_unit_weight); + current_unit_weight = NULL; } return NNACL_NULL_PTR; } @@ -68,9 +70,11 @@ int PackDeConvWgDataFp32(float *nhwc_weight, DeConvComputeUnit *unit, ConvParame if (unit->winograd_.BT_ == NULL) { if (current_unit_weight != NULL) { free(current_unit_weight); + current_unit_weight = NULL; } if (unit->winograd_.AT_ != NULL) { free(unit->winograd_.AT_); + unit->winograd_.AT_ = NULL; } return NNACL_NULL_PTR; } @@ -82,12 +86,15 @@ int PackDeConvWgDataFp32(float *nhwc_weight, DeConvComputeUnit *unit, ConvParame if (winograd_unit_weight == NULL) { if (current_unit_weight != NULL) { free(current_unit_weight); + current_unit_weight = NULL; } if (unit->winograd_.AT_ != NULL) { free(unit->winograd_.AT_); + unit->winograd_.AT_ = NULL; } if (unit->winograd_.BT_ != NULL) { free(unit->winograd_.BT_); + unit->winograd_.BT_ = NULL; } return NNACL_NULL_PTR; } @@ -97,6 +104,7 @@ int PackDeConvWgDataFp32(float *nhwc_weight, DeConvComputeUnit *unit, ConvParame /* reset weight data & info */ tmp_kernel_plane = unit->winograd_.kh_ * unit->winograd_.kw_; free(current_unit_weight); + current_unit_weight = NULL; current_unit_weight = winograd_unit_weight; winograd_unit_weight = NULL; } @@ -119,6 +127,7 @@ int PackDeConvWgDataFp32(float *nhwc_weight, DeConvComputeUnit *unit, ConvParame if (current_unit_weight != NULL) { free(current_unit_weight); + current_unit_weight = NULL; } return NNACL_OK; } @@ -332,7 +341,7 @@ void DeConvWgMerge(const float *src, float *dst, size_t src_stride, size_t dst_s } void _deConvWinograd(const float *tile_in, float *tile_out, float *weight_buf, float *tmp_buf, float *at_buf, - float *a_mid_buf, float *trans_a_buf, bool *transfered, float *bt_buf, float *b_tmp_buf, + float *a_mid_buf, float *trans_a_buf, bool *transfered, const float *bt_buf, float *b_tmp_buf, int unit_size, int w_start, int h_start, ConvParameter *conv_param, DeConvParam *deconv_param) { int winograd_plane = unit_size * unit_size; if (!transfered[unit_size]) { diff --git a/mindspore/lite/nnacl/fp32/elu.c b/mindspore/lite/nnacl/fp32/elu.c index b31940a6c7..01d5a2f848 100644 --- a/mindspore/lite/nnacl/fp32/elu.c +++ b/mindspore/lite/nnacl/fp32/elu.c @@ -22,7 +22,7 @@ void Calculate_Data(const float *input_data, float *output_data, int num, EluPar output_data[num] = input_data[num] < 0 ? parameter->alpha_ * expm1(input_data[num]) : input_data[num]; } -int Elu(float *input_data, float *output_data, EluParameter *parameter, int task_id) { +int Elu(const float *input_data, float *output_data, EluParameter *parameter, int task_id) { for (size_t i = task_id; i < parameter->in_size_; i += parameter->thread_num_) { Calculate_Data(input_data, output_data, i, parameter); } diff --git a/mindspore/lite/nnacl/fp32/elu.h b/mindspore/lite/nnacl/fp32/elu.h index 623a40120e..d60826135f 100644 --- a/mindspore/lite/nnacl/fp32/elu.h +++ b/mindspore/lite/nnacl/fp32/elu.h @@ -29,7 +29,7 @@ typedef struct EluParameter { #ifdef __cplusplus extern "C" { #endif -int Elu(float *input_data, float *output_data, EluParameter *parameter, int task_id); +int Elu(const float *input_data, float *output_data, EluParameter *parameter, int task_id); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/fp32/gatherNd.c b/mindspore/lite/nnacl/fp32/gatherNd.c index 56b332b308..1a0dc7120a 100644 --- a/mindspore/lite/nnacl/fp32/gatherNd.c +++ b/mindspore/lite/nnacl/fp32/gatherNd.c @@ -18,7 +18,7 @@ #include #include "nnacl/errorcode.h" -int GatherNd(const float *input, float *output, int *in_offset, int area, int count) { +int GatherNd(const float *input, float *output, const int *in_offset, int area, int count) { int i = 0; for (i = 0; i < count; i++) { (void)memcpy(output + area * i, input + in_offset[i], area * sizeof(float)); diff --git a/mindspore/lite/nnacl/fp32/gatherNd.h b/mindspore/lite/nnacl/fp32/gatherNd.h index 701000254c..2d6c651352 100644 --- a/mindspore/lite/nnacl/fp32/gatherNd.h +++ b/mindspore/lite/nnacl/fp32/gatherNd.h @@ -27,7 +27,7 @@ typedef struct GatherNdParameter { #ifdef __cplusplus extern "C" { #endif -int GatherNd(const float *input, float *output, int *in_offset, int area, int count); +int GatherNd(const float *input, float *output, const int *in_offset, int area, int count); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/fp32/lstm.c b/mindspore/lite/nnacl/fp32/lstm.c index b694c66c98..d90e405347 100644 --- a/mindspore/lite/nnacl/fp32/lstm.c +++ b/mindspore/lite/nnacl/fp32/lstm.c @@ -79,13 +79,13 @@ void ElementMulAcc(const float *input0, const float *input1, float *output, int } } -void UpdataState(float *cell_state, float *forget_gate, const float *input_gate, float *cell_gate, int batch, +void UpdataState(float *cell_state, const float *forget_gate, const float *input_gate, float *cell_gate, int batch, int hidden_size) { ElementMul(forget_gate, cell_state, cell_state, batch * hidden_size); ElementMulAcc(input_gate, cell_gate, cell_state, batch * hidden_size); } -void UpdataOutput(const float *cell_state, float *output_gate, float *hidden_state, int batch, int hidden_size) { +void UpdataOutput(const float *cell_state, const float *output_gate, float *hidden_state, int batch, int hidden_size) { Tanh(cell_state, batch * hidden_size, hidden_state); ElementMul(hidden_state, output_gate, hidden_state, batch * hidden_size); } diff --git a/mindspore/lite/nnacl/fp32/resize.c b/mindspore/lite/nnacl/fp32/resize.c index 5cf7beeeb8..28d0f80750 100644 --- a/mindspore/lite/nnacl/fp32/resize.c +++ b/mindspore/lite/nnacl/fp32/resize.c @@ -65,7 +65,7 @@ int PrepareResizeBilinear(const int *input_shape, const int *output_shape, bool } int ResizeBilinear(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, - int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, + const int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, float *x_left_weights, int n_h_begin, int n_h_end) { if (input_data == NULL || output_data == NULL || input_shape == NULL || output_shape == NULL || y_bottoms == NULL || y_tops == NULL || x_lefts == NULL || x_rights == NULL || y_bottom_weights == NULL || x_left_weights == NULL) { @@ -154,7 +154,7 @@ int ResizeBilinear(const float *input_data, float *output_data, const int *input return NNACL_OK; } -int InterpRow(const float *src_line, float *linear_output, int new_width, float *x_left_weights, int *x_lefts, +int InterpRow(const float *src_line, float *linear_output, int new_width, float *x_left_weights, const int *x_lefts, const int *x_rights, int in_c) { int w; for (w = 0; w < new_width; w++) { @@ -208,7 +208,7 @@ int InterpCol(const float *bottom_line, const float *top_line, float *output, in } int ResizeBilinear2(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, - int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, + const int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, float *x_left_weights, float *line0, float *line1, int n_h_begin, int n_h_end) { if (input_data == NULL || output_data == NULL || input_shape == NULL || output_shape == NULL || y_bottoms == NULL || y_tops == NULL || x_lefts == NULL || x_rights == NULL || y_bottom_weights == NULL || x_left_weights == NULL) { diff --git a/mindspore/lite/nnacl/fp32/resize.h b/mindspore/lite/nnacl/fp32/resize.h index 3332d0c315..e9114d042a 100644 --- a/mindspore/lite/nnacl/fp32/resize.h +++ b/mindspore/lite/nnacl/fp32/resize.h @@ -30,11 +30,11 @@ int PrepareResizeBilinear(const int *input_shape, const int *output_shape, bool int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, float *x_left_weights); int ResizeBilinear(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, - int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, + const int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, float *x_left_weights, int n_h_begin, int n_h_end); int ResizeBilinear2(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, - int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, + const int *y_bottoms, const int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, float *x_left_weights, float *line0, float *line1, int n_h_begin, int n_h_end); int ResizeNearestNeighbor(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, diff --git a/mindspore/lite/nnacl/fp32/scale.c b/mindspore/lite/nnacl/fp32/scale.c index 87d2fa79c6..008096989a 100644 --- a/mindspore/lite/nnacl/fp32/scale.c +++ b/mindspore/lite/nnacl/fp32/scale.c @@ -18,8 +18,8 @@ #ifdef ENABLE_ARM #include #endif -void ScaleInner(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end, - int axis_size, int inner_size) { +void ScaleInner(float *in_data, float *out_data, const float *scale, const float *offset, int outer_start, + int outer_end, int axis_size, int inner_size) { for (int out = outer_start; out < outer_end; out++) { int out_offset = out * axis_size * inner_size; for (int i = 0; i < axis_size; i++) { @@ -43,7 +43,7 @@ void ScaleInner(float *in_data, float *out_data, const float *scale, float *offs } } -void ScaleAxis(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end, +void ScaleAxis(float *in_data, float *out_data, const float *scale, const float *offset, int outer_start, int outer_end, int axis_size) { for (int out = outer_start; out < outer_end; out++) { int out_offset = out * axis_size; @@ -65,7 +65,8 @@ void ScaleAxis(float *in_data, float *out_data, const float *scale, float *offse } } -void DoScale(float *in_data, float *out_data, float *scale, float *offset, int task_id, ScaleParameter *scale_param) { +void DoScale(float *in_data, float *out_data, const float *scale, float *offset, int task_id, + ScaleParameter *scale_param) { int outer_step = UP_DIV(scale_param->outer_size_, scale_param->op_parameter_.thread_num_); int outer_start = task_id * outer_step; int outer_end = MSMIN(outer_start + outer_step, scale_param->outer_size_); @@ -78,8 +79,8 @@ void DoScale(float *in_data, float *out_data, float *scale, float *offset, int t } } -void ScaleInnerRelu(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end, - int axis_size, int inner_size) { +void ScaleInnerRelu(float *in_data, float *out_data, const float *scale, const float *offset, int outer_start, + int outer_end, int axis_size, int inner_size) { #ifdef ENABLE_ARM64 float32x4_t zeros = {0, 0, 0, 0}; #endif @@ -108,8 +109,8 @@ void ScaleInnerRelu(float *in_data, float *out_data, const float *scale, float * } } -void ScaleAxisRelu(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end, - int axis_size) { +void ScaleAxisRelu(float *in_data, float *out_data, const float *scale, const float *offset, int outer_start, + int outer_end, int axis_size) { #ifdef ENABLE_ARM64 float32x4_t zeros = {0, 0, 0, 0}; #endif @@ -135,7 +136,7 @@ void ScaleAxisRelu(float *in_data, float *out_data, const float *scale, float *o } } -void DoScaleRelu(float *in_data, float *out_data, float *scale, float *offset, int task_id, +void DoScaleRelu(float *in_data, float *out_data, const float *scale, float *offset, int task_id, ScaleParameter *scale_param) { int outer_step = UP_DIV(scale_param->outer_size_, scale_param->op_parameter_.thread_num_); int outer_start = task_id * outer_step; @@ -149,8 +150,8 @@ void DoScaleRelu(float *in_data, float *out_data, float *scale, float *offset, i } } -void ScaleInnerRelu6(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end, - int axis_size, int inner_size) { +void ScaleInnerRelu6(float *in_data, float *out_data, const float *scale, const float *offset, int outer_start, + int outer_end, int axis_size, int inner_size) { #ifdef ENABLE_ARM64 float32x4_t zeros = {0, 0, 0, 0}; float32x4_t bounds = {6, 6, 6, 6}; @@ -180,8 +181,8 @@ void ScaleInnerRelu6(float *in_data, float *out_data, const float *scale, float } } -void ScaleAxisRelu6(float *in_data, float *out_data, const float *scale, float *offset, int outer_start, int outer_end, - int axis_size) { +void ScaleAxisRelu6(float *in_data, float *out_data, const float *scale, const float *offset, int outer_start, + int outer_end, int axis_size) { #ifdef ENABLE_ARM64 float32x4_t zeros = {0, 0, 0, 0}; float32x4_t bounds = {6, 6, 6, 6}; @@ -208,7 +209,7 @@ void ScaleAxisRelu6(float *in_data, float *out_data, const float *scale, float * } } -void DoScaleRelu6(float *in_data, float *out_data, float *scale, float *offset, int task_id, +void DoScaleRelu6(float *in_data, float *out_data, const float *scale, float *offset, int task_id, ScaleParameter *scale_param) { int outer_step = UP_DIV(scale_param->outer_size_, scale_param->op_parameter_.thread_num_); int outer_start = task_id * outer_step; diff --git a/mindspore/lite/nnacl/fp32/scale.h b/mindspore/lite/nnacl/fp32/scale.h index f1474e421c..53b28a4210 100644 --- a/mindspore/lite/nnacl/fp32/scale.h +++ b/mindspore/lite/nnacl/fp32/scale.h @@ -22,10 +22,11 @@ #ifdef __cplusplus extern "C" { #endif -void DoScale(float *in_data, float *out_data, float *scale, float *offset, int task_id, ScaleParameter *scale_param); -void DoScaleRelu(float *in_data, float *out_data, float *scale, float *offset, int task_id, +void DoScale(float *in_data, float *out_data, const float *scale, float *offset, int task_id, + ScaleParameter *scale_param); +void DoScaleRelu(float *in_data, float *out_data, const float *scale, float *offset, int task_id, ScaleParameter *scale_param); -void DoScaleRelu6(float *in_data, float *out_data, float *scale, float *offset, int task_id, +void DoScaleRelu6(float *in_data, float *out_data, const float *scale, float *offset, int task_id, ScaleParameter *scale_param); #ifdef __cplusplus } diff --git a/mindspore/lite/nnacl/fp32/softmax.c b/mindspore/lite/nnacl/fp32/softmax.c index 484f9777bf..2315ce8c90 100644 --- a/mindspore/lite/nnacl/fp32/softmax.c +++ b/mindspore/lite/nnacl/fp32/softmax.c @@ -16,7 +16,6 @@ #include "nnacl/fp32/softmax.h" #include -#include // output = exp(input) / reduce_sum(exp(input), axis) void Softmax(const float *input_ptr, float *output_ptr, float *sum_data, SoftmaxParameter *parameter) { diff --git a/mindspore/lite/nnacl/fp32/space_to_batch.c b/mindspore/lite/nnacl/fp32/space_to_batch.c index fa1967abc5..3ae61e9ffc 100644 --- a/mindspore/lite/nnacl/fp32/space_to_batch.c +++ b/mindspore/lite/nnacl/fp32/space_to_batch.c @@ -16,7 +16,8 @@ #include "nnacl/fp32/space_to_batch.h" #include "nnacl/arithmetic_common.h" -void DoSpaceToBatchNHWC(const float *input, float *output, const int *block_sizes, int *in_shape, int *out_shape) { +void DoSpaceToBatchNHWC(const float *input, float *output, const int *block_sizes, int *in_shape, + const int *out_shape) { int out_dim0 = out_shape[0]; int out_dim1 = out_shape[1]; int out_dim2 = out_shape[2]; @@ -45,7 +46,8 @@ void DoSpaceToBatchNHWC(const float *input, float *output, const int *block_size } } -void DoSpaceToBatchPaddingNHWC(const float *input, float *output, int *in_shape, const int *padding, int *out_shape) { +void DoSpaceToBatchPaddingNHWC(const float *input, float *output, int *in_shape, const int *padding, + const int *out_shape) { int in_h = in_shape[1]; int in_w = in_shape[2]; int in_c = in_shape[3]; diff --git a/mindspore/lite/nnacl/fp32/space_to_batch.h b/mindspore/lite/nnacl/fp32/space_to_batch.h index 65f16e3df0..7a5b26cc68 100644 --- a/mindspore/lite/nnacl/fp32/space_to_batch.h +++ b/mindspore/lite/nnacl/fp32/space_to_batch.h @@ -30,8 +30,9 @@ typedef struct SpaceToBatchParameter { #ifdef __cplusplus extern "C" { #endif -void DoSpaceToBatchNHWC(const float *input, float *output, const int *block_sizes, int *in_shape, int *out_shape); -void DoSpaceToBatchPaddingNHWC(const float *input, float *output, int *in_shape, const int *padding, int *out_shape); +void DoSpaceToBatchNHWC(const float *input, float *output, const int *block_sizes, int *in_shape, const int *out_shape); +void DoSpaceToBatchPaddingNHWC(const float *input, float *output, int *in_shape, const int *padding, + const int *out_shape); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/fp32/space_to_depth.c b/mindspore/lite/nnacl/fp32/space_to_depth.c index e486d190bd..35d6628941 100644 --- a/mindspore/lite/nnacl/fp32/space_to_depth.c +++ b/mindspore/lite/nnacl/fp32/space_to_depth.c @@ -18,7 +18,7 @@ #include "nnacl/errorcode.h" #include "nnacl/op_base.h" -int SpaceToDepthForNHWC(const float *input, float *output, int *in_shape, int *out_shape, int shape_size, +int SpaceToDepthForNHWC(const float *input, float *output, int *in_shape, const int *out_shape, int shape_size, int block_size, int h_start, int h_end) { if (input == NULL || output == NULL) { return NNACL_NULL_PTR; diff --git a/mindspore/lite/nnacl/fp32/space_to_depth.h b/mindspore/lite/nnacl/fp32/space_to_depth.h index e626b9c1d5..a19e17f8ba 100644 --- a/mindspore/lite/nnacl/fp32/space_to_depth.h +++ b/mindspore/lite/nnacl/fp32/space_to_depth.h @@ -24,7 +24,7 @@ typedef struct SpaceToDepthParameter { #ifdef __cplusplus extern "C" { #endif -int SpaceToDepthForNHWC(const float *input, float *output, int *in_shape, int *out_shape, int shape_size, +int SpaceToDepthForNHWC(const float *input, float *output, int *in_shape, const int *out_shape, int shape_size, int block_size, int h_start, int h_end); #ifdef __cplusplus } diff --git a/mindspore/lite/nnacl/fp32/sparse_to_dense.c b/mindspore/lite/nnacl/fp32/sparse_to_dense.c index 3e044f9892..ccc795b95a 100644 --- a/mindspore/lite/nnacl/fp32/sparse_to_dense.c +++ b/mindspore/lite/nnacl/fp32/sparse_to_dense.c @@ -15,7 +15,7 @@ */ #include "nnacl/fp32/sparse_to_dense.h" -void SparseToDense(int **sparse_indices, int *output_shape, const float *sparse_values, float default_value, +void SparseToDense(int **sparse_indices, const int *output_shape, const float *sparse_values, float default_value, float *output, bool isScalar, int index_start, int index_end, int out_width) { for (int i = index_start; i < index_end; i++) { for (int j = 0; j < out_width; j++) { diff --git a/mindspore/lite/nnacl/fp32/sparse_to_dense.h b/mindspore/lite/nnacl/fp32/sparse_to_dense.h index c7e9df6e68..2be2695bc5 100644 --- a/mindspore/lite/nnacl/fp32/sparse_to_dense.h +++ b/mindspore/lite/nnacl/fp32/sparse_to_dense.h @@ -21,7 +21,7 @@ #ifdef __cplusplus extern "C" { #endif -void SparseToDense(int **sparse_indices_vect, int *output_shape, const float *sparse_values, float default_value, +void SparseToDense(int **sparse_indices_vect, const int *output_shape, const float *sparse_values, float default_value, float *output, bool isScalar, int index_start, int index_end, int out_width); #ifdef __cplusplus } diff --git a/mindspore/lite/nnacl/fp32/stack.c b/mindspore/lite/nnacl/fp32/stack.c index ea30d24c2f..94ab176a93 100644 --- a/mindspore/lite/nnacl/fp32/stack.c +++ b/mindspore/lite/nnacl/fp32/stack.c @@ -17,7 +17,7 @@ #include "nnacl/fp32/stack.h" #include "nnacl/arithmetic_common.h" -size_t GetStackCopyNum(int axis, int *in_shape, size_t shape_size) { +size_t GetStackCopyNum(int axis, const int *in_shape, size_t shape_size) { size_t one_input_size = 1; for (size_t i = 0; i < shape_size; ++i) { one_input_size *= in_shape[i]; @@ -68,6 +68,4 @@ void DoStackInt32(const int32_t *const *inputs, size_t input_num, int *in_shape, } } -void DoStackOneInput(const int8_t *input, int8_t *output, size_t data_size) { - memcpy(output, input, data_size); -} +void DoStackOneInput(const int8_t *input, int8_t *output, size_t data_size) { memcpy(output, input, data_size); } diff --git a/mindspore/lite/nnacl/int8/batch_to_space_int8.c b/mindspore/lite/nnacl/int8/batch_to_space_int8.c index dec894404e..5bd37aaf36 100644 --- a/mindspore/lite/nnacl/int8/batch_to_space_int8.c +++ b/mindspore/lite/nnacl/int8/batch_to_space_int8.c @@ -15,7 +15,6 @@ */ #include "nnacl/int8/batch_to_space_int8.h" -#include "nnacl/arithmetic_common.h" void BatchToSpaceNoCropForNHWCInt8(const int8_t *input, int8_t *output, const int *in_shape, int out_n, const int *block, QuantArg *in_quant_arg, QuantArg *out_quant_arg) { diff --git a/mindspore/lite/nnacl/int8/conv_depthwise_int8.c b/mindspore/lite/nnacl/int8/conv_depthwise_int8.c index 187bf90674..78fd2cb03f 100644 --- a/mindspore/lite/nnacl/int8/conv_depthwise_int8.c +++ b/mindspore/lite/nnacl/int8/conv_depthwise_int8.c @@ -491,7 +491,7 @@ void ConvDw3x3Int8Pad(int8_t *output_data, const int8_t *input_data, const int16 /*conv depthwise sliding window perchannel int8 begin*/ void DepthwiseBorderPixelInt8(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, int height, int width, int in_kh_step, int in_kw_step, int kernel_w, int8_t *input_zp, - int32_t *out_zp, int *out_multiplier, int *left_shift, const int *right_shift, + int32_t *out_zp, const int *out_multiplier, int *left_shift, const int *right_shift, int32_t *acc_min, int32_t *acc_max) { int tmp_buffer[C8NUM]; for (int i = 0; i < C8NUM; i++) { @@ -528,7 +528,7 @@ void DepthwiseBorderPixelInt8(int8_t *dst, const int8_t *src, const int16_t *wei void DepthwiseBorderInt8(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, int top, int bottom, int left, int right, const ConvParameter *conv_param, const SlidingWindowParam *sliding, int8_t *in_zp, int32_t *out_zp, int *out_multiplier, - int *left_shift, int *right_shift, int32_t *acc_min, int32_t *acc_max) { + int *left_shift, const int *right_shift, int32_t *acc_min, int32_t *acc_max) { int8_t *dst_h = dst + top * sliding->out_h_step_; for (int oh = top; oh < bottom; oh++) { int ih = oh * conv_param->stride_h_ - conv_param->pad_u_; diff --git a/mindspore/lite/nnacl/int8/space_to_batch_int8.c b/mindspore/lite/nnacl/int8/space_to_batch_int8.c index 970437a281..33a9741ced 100644 --- a/mindspore/lite/nnacl/int8/space_to_batch_int8.c +++ b/mindspore/lite/nnacl/int8/space_to_batch_int8.c @@ -17,7 +17,7 @@ #include "nnacl/arithmetic_common.h" void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, const int *block_sizes, int *in_shape, - int *out_shape) { + const int *out_shape) { int out_dim0 = out_shape[0]; int out_dim1 = out_shape[1]; int out_dim2 = out_shape[2]; @@ -47,7 +47,7 @@ void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, const int *bloc } void DoSpaceToBatchPaddingNHWCInt8(const int8_t *input, int8_t *output, int *in_shape, const int *padding, - int *out_shape, int32_t zp) { + const int *out_shape, int32_t zp) { int in_h = in_shape[1]; int in_w = in_shape[2]; int in_c = in_shape[3]; diff --git a/mindspore/lite/nnacl/int8/space_to_batch_int8.h b/mindspore/lite/nnacl/int8/space_to_batch_int8.h index cd19b442ff..cebcb0f162 100644 --- a/mindspore/lite/nnacl/int8/space_to_batch_int8.h +++ b/mindspore/lite/nnacl/int8/space_to_batch_int8.h @@ -21,9 +21,10 @@ #ifdef __cplusplus extern "C" { #endif -void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, const int *block_sizes, int *in_shape, int *out_shape); +void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, const int *block_sizes, int *in_shape, + const int *out_shape); void DoSpaceToBatchPaddingNHWCInt8(const int8_t *input, int8_t *output, int *in_shape, const int *padding, - int *out_shape, int32_t zp); + const int *out_shape, int32_t zp); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/minimal_filtering_generator.c b/mindspore/lite/nnacl/minimal_filtering_generator.c index a27b035993..b32c87e34b 100644 --- a/mindspore/lite/nnacl/minimal_filtering_generator.c +++ b/mindspore/lite/nnacl/minimal_filtering_generator.c @@ -54,7 +54,7 @@ void ResidueMatrix(const float *interval, float *b, int row, int col) { b[len - 1] = 1; } -int LT(float *poly_array, float *matrix_lt, int n) { +int LT(const float *poly_array, float *matrix_lt, int n) { if (n > MAX_LEN) { return NNACL_ERR; } diff --git a/mindspore/lite/nnacl/minimal_filtering_generator.h b/mindspore/lite/nnacl/minimal_filtering_generator.h index 376794c7be..14c402cad1 100644 --- a/mindspore/lite/nnacl/minimal_filtering_generator.h +++ b/mindspore/lite/nnacl/minimal_filtering_generator.h @@ -32,7 +32,7 @@ void DiagonalPlusMatrix(const float *matrix, float *diagonal_matrix, int degree) void ResidueMatrix(const float *interval, float *b, int row, int col); -int LT(float *poly_array, float *matrix_lt, int n); +int LT(const float *poly_array, float *matrix_lt, int n); void T(const float *poly_array, float *matrix_t, int n); diff --git a/mindspore/lite/nnacl/transpose.c b/mindspore/lite/nnacl/transpose.c index 4dbe7a7d9e..053f5ad3ae 100644 --- a/mindspore/lite/nnacl/transpose.c +++ b/mindspore/lite/nnacl/transpose.c @@ -18,8 +18,8 @@ #include #include "nnacl/errorcode.h" -void TransposeDim2(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, - int h_start, int h_end) { +void TransposeDim2(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm, + int *output_shape, int h_start, int h_end) { const int stride0 = strides[perm[0]]; const int stride1 = strides[perm[1]]; const int output0 = output_shape[0]; @@ -33,8 +33,8 @@ void TransposeDim2(float *in_data, float *out_data, const int *strides, int *out } } -void TransposeDim3(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, - int h_start, int h_end) { +void TransposeDim3(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm, + int *output_shape, int h_start, int h_end) { const int stride0 = strides[perm[0]]; const int stride1 = strides[perm[1]]; const int stride2 = strides[perm[2]]; @@ -56,8 +56,8 @@ void TransposeDim3(float *in_data, float *out_data, const int *strides, int *out } } -void TransposeDim4(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, - int h_start, int h_end) { +void TransposeDim4(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm, + int *output_shape, int h_start, int h_end) { const int stride0 = strides[perm[0]]; const int stride1 = strides[perm[1]]; const int stride2 = strides[perm[2]]; @@ -88,8 +88,8 @@ void TransposeDim4(float *in_data, float *out_data, const int *strides, int *out } } -void TransposeDim5(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, - int h_start, int h_end) { +void TransposeDim5(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm, + int *output_shape, int h_start, int h_end) { const int stride0 = strides[perm[0]]; const int stride1 = strides[perm[1]]; const int stride2 = strides[perm[2]]; @@ -127,8 +127,8 @@ void TransposeDim5(float *in_data, float *out_data, const int *strides, int *out } } -void TransposeDims(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, - int h_start, int h_end, int dims, int *size, int *position) { +void TransposeDims(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm, + int *output_shape, int h_start, int h_end, int dims, int *size, int *position) { *(size + dims - 1) = 1; for (int i = dims - 1; i > 0; --i) { *(size + i - 1) = *(size + i) * output_shape[i]; diff --git a/mindspore/lite/nnacl/transpose.h b/mindspore/lite/nnacl/transpose.h index 753d9031f1..219005ea81 100644 --- a/mindspore/lite/nnacl/transpose.h +++ b/mindspore/lite/nnacl/transpose.h @@ -34,16 +34,16 @@ extern "C" { #endif int DoTranspose(float *in_data, float *out_data, int *input_shape, int *output_shape, TransposeParameter *transpose_param, int h_start, int h_end, int *size, int *position); -void TransposeDim2(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, - int h_start, int h_end); -void TransposeDim3(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, - int h_start, int h_end); -void TransposeDim4(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, - int h_start, int h_end); -void TransposeDim5(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, - int h_start, int h_end); -void TransposeDims(float *in_data, float *out_data, const int *strides, int *out_strides, int *perm, int *output_shape, - int h_start, int h_end, int dims, int *size, int *position); +void TransposeDim2(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm, + int *output_shape, int h_start, int h_end); +void TransposeDim3(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm, + int *output_shape, int h_start, int h_end); +void TransposeDim4(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm, + int *output_shape, int h_start, int h_end); +void TransposeDim5(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm, + int *output_shape, int h_start, int h_end); +void TransposeDims(float *in_data, float *out_data, const int *strides, int *out_strides, const int *perm, + int *output_shape, int h_start, int h_end, int dims, int *size, int *position); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/where.c b/mindspore/lite/nnacl/where.c index 43617b755a..666a9a79c7 100644 --- a/mindspore/lite/nnacl/where.c +++ b/mindspore/lite/nnacl/where.c @@ -15,7 +15,8 @@ */ #include "nnacl/where.h" -void Where(bool *input, float *input1, const float *input2, float *output, WhereParameter *where_param_, int task_id) { +void Where(bool *input, const float *input1, const float *input2, float *output, WhereParameter *where_param_, + int task_id) { for (int i = task_id; i < where_param_->number_; i += where_param_->op_parameter_.thread_num_) { if (input[where_param_->num_ > 1 ? i : 0] == true) { output[i] = input1[where_param_->num1_ > 1 ? i : 0]; diff --git a/mindspore/lite/nnacl/where.h b/mindspore/lite/nnacl/where.h index 33e4f5b73c..f405a1f0ee 100644 --- a/mindspore/lite/nnacl/where.h +++ b/mindspore/lite/nnacl/where.h @@ -30,7 +30,8 @@ typedef struct WhereParameter { #ifdef __cplusplus extern "C" { #endif -void Where(bool *input, float *input1, const float *input2, float *output, WhereParameter *where_param_, int task_id); +void Where(bool *input, const float *input1, const float *input2, float *output, WhereParameter *where_param_, + int task_id); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/winograd_utils.c b/mindspore/lite/nnacl/winograd_utils.c index f17d44323c..cbbfb13412 100644 --- a/mindspore/lite/nnacl/winograd_utils.c +++ b/mindspore/lite/nnacl/winograd_utils.c @@ -75,7 +75,7 @@ static OutputTransFunc OutputTransFuncRelu6List8[] = {NULL, OutputTransform8x6Relu6Unit, OutputTransform8x7Relu6Unit}; -void GeneralInputTransformUnit(const float *src_data, float *dst_data, float *matrix_b, const float *matrix_bt, +void GeneralInputTransformUnit(const float *src_data, float *dst_data, const float *matrix_b, const float *matrix_bt, int src_step, int dst_step, int in_unit) { int len = in_unit * in_unit; if (len > MAX_LEN) return; @@ -112,7 +112,7 @@ void GeneralInputTransformUnit(const float *src_data, float *dst_data, float *ma #endif } -void GeneralOutputTransformUnit(const float *src_data, float *dst_data, const float *bias_data, float *matrix_a, +void GeneralOutputTransformUnit(const float *src_data, float *dst_data, const float *bias_data, const float *matrix_a, const float *matrix_at, int src_step, int dst_step, int in_unit, int out_unit) { int src_len = in_unit * in_unit; if (src_len > MAX_LEN) { diff --git a/mindspore/lite/nnacl/winograd_utils.h b/mindspore/lite/nnacl/winograd_utils.h index bc6f29b697..18b5783f2e 100644 --- a/mindspore/lite/nnacl/winograd_utils.h +++ b/mindspore/lite/nnacl/winograd_utils.h @@ -33,10 +33,10 @@ typedef void (*InputTransFunc)(const float *src_data, float *dst_data, int src_s typedef void (*OutputTransFunc)(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void GeneralInputTransformUnit(const float *src_data, float *dst_data, float *matrix_b, const float *matrix_bt, +void GeneralInputTransformUnit(const float *src_data, float *dst_data, const float *matrix_b, const float *matrix_bt, int src_step, int dst_step, int in_unit); -void GeneralOutputTransformUnit(const float *src_data, float *dst_data, const float *bias_data, float *matrix_a, +void GeneralOutputTransformUnit(const float *src_data, float *dst_data, const float *bias_data, const float *matrix_a, const float *matrix_at, int src_step, int dst_step, int in_unit, int out_unit); #define Load16Data \ diff --git a/mindspore/lite/src/common/file_utils.cc b/mindspore/lite/src/common/file_utils.cc index c32ecd9605..a84a175c5f 100644 --- a/mindspore/lite/src/common/file_utils.cc +++ b/mindspore/lite/src/common/file_utils.cc @@ -85,7 +85,7 @@ std::string RealPath(const char *path) { return res; } -int CompareOutputData(const float *output_data, size_t output_size, float *correct_data, size_t data_size) { +int CompareOutputData(const float *output_data, size_t output_size, const float *correct_data, size_t data_size) { if (output_size != data_size) { printf("compare failed, output_size %zu isn't equal to data_size %zu.\n", output_size, data_size); return 0; @@ -107,7 +107,7 @@ int CompareOutputData(const float *output_data, size_t output_size, float *corre return 0; } -int CompareOutput(float *output_data, size_t output_num, std::string file_path) { +int CompareOutput(const float *output_data, size_t output_num, std::string file_path) { size_t ground_truth_size; auto ground_truth = reinterpret_cast(mindspore::lite::ReadFile(file_path.c_str(), &ground_truth_size)); size_t ground_truth_num = ground_truth_size / sizeof(float); diff --git a/mindspore/lite/src/common/file_utils.h b/mindspore/lite/src/common/file_utils.h index aadf7f6e5b..b9cae19ced 100644 --- a/mindspore/lite/src/common/file_utils.h +++ b/mindspore/lite/src/common/file_utils.h @@ -58,8 +58,8 @@ inline int WriteToBin(const std::string &file_path, void *data, size_t size) { return 0; } -int CompareOutputData(const float *output_data, size_t output_num, float *correct_data, size_t data_size); -int CompareOutput(float *output_data, size_t output_num, std::string file_path); +int CompareOutputData(const float *output_data, size_t output_num, const float *correct_data, size_t data_size); +int CompareOutput(const float *output_data, size_t output_num, std::string file_path); std::string GetAndroidPackageName(); std::string GetAndroidPackagePath(); diff --git a/mindspore/lite/src/common/file_utils_ext.cc b/mindspore/lite/src/common/file_utils_ext.cc index c8110619ad..036b2fd62f 100644 --- a/mindspore/lite/src/common/file_utils_ext.cc +++ b/mindspore/lite/src/common/file_utils_ext.cc @@ -21,7 +21,7 @@ namespace mindspore { namespace lite { -static float CompareOutputRelativeData(const float *output_data, float *correct_data, int data_size) { +static float CompareOutputRelativeData(const float *output_data, const float *correct_data, int data_size) { float error = 0; // relative error @@ -38,7 +38,7 @@ static float CompareOutputRelativeData(const float *output_data, float *correct_ return error; } -int CompareRelativeOutput(float *output_data, std::string file_path) { +int CompareRelativeOutput(const float *output_data, std::string file_path) { size_t output_size; auto ground_truth = reinterpret_cast(mindspore::lite::ReadFile(file_path.c_str(), &output_size)); if (ground_truth == nullptr) { @@ -53,7 +53,7 @@ int CompareRelativeOutput(float *output_data, std::string file_path) { return 0; } -float RelativeOutputError(float *output_data, std::string file_path) { +float RelativeOutputError(const float *output_data, std::string file_path) { size_t output_size; auto ground_truth = reinterpret_cast(mindspore::lite::ReadFile(file_path.c_str(), &output_size)); size_t output_num = output_size / sizeof(float); diff --git a/mindspore/lite/src/common/file_utils_ext.h b/mindspore/lite/src/common/file_utils_ext.h index 2f6561ffbf..f81c2e434c 100644 --- a/mindspore/lite/src/common/file_utils_ext.h +++ b/mindspore/lite/src/common/file_utils_ext.h @@ -20,8 +20,8 @@ namespace mindspore { namespace lite { -int CompareRelativeOutput(float *output_data, std::string file_path); -float RelativeOutputError(float *output_data, std::string file_path); +int CompareRelativeOutput(const float *output_data, std::string file_path); +float RelativeOutputError(const float *output_data, std::string file_path); } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_COMMON_FILE_UTILS_EXT_H_ diff --git a/mindspore/lite/src/ops/assign_add.cc b/mindspore/lite/src/ops/assign_add.cc index fe3c874fba..b65f6d23e4 100644 --- a/mindspore/lite/src/ops/assign_add.cc +++ b/mindspore/lite/src/ops/assign_add.cc @@ -37,6 +37,7 @@ int AssignAdd::UnPackAttr(const Primitive &prim, const std::vector & << schema::EnumNamePrimitiveType(primitive_->value.type) << "is not equal" << schema::EnumNamePrimitiveType(schema::PrimitiveType_AssignAdd); delete this->primitive_; + this->primitive_ = nullptr; return RET_ERROR; } if (this->primitive_->value.value == nullptr) { @@ -44,6 +45,7 @@ int AssignAdd::UnPackAttr(const Primitive &prim, const std::vector & if (this->primitive_->value.value == nullptr) { MS_LOG(ERROR) << "new primitiveT value failed"; delete this->primitive_; + this->primitive_ = nullptr; return RET_ERROR; } } diff --git a/mindspore/lite/src/ops/binary_cross_entropy_grad.cc b/mindspore/lite/src/ops/binary_cross_entropy_grad.cc index 3b161ad4dc..1e61641ef1 100644 --- a/mindspore/lite/src/ops/binary_cross_entropy_grad.cc +++ b/mindspore/lite/src/ops/binary_cross_entropy_grad.cc @@ -43,6 +43,7 @@ int BinaryCrossEntropyGrad::UnPackAttr(const Primitive &prim, const std::vector< << schema::EnumNamePrimitiveType(primitive_->value.type) << "is not equal" << schema::EnumNamePrimitiveType(schema::PrimitiveType_BinaryCrossEntropyGrad); delete this->primitive_; + this->primitive_ = nullptr; return RET_ERROR; } if (this->primitive_->value.value == nullptr) { @@ -58,6 +59,8 @@ int BinaryCrossEntropyGrad::UnPackAttr(const Primitive &prim, const std::vector< MS_LOG(ERROR) << "get reduction failed!"; delete this->primitive_; delete attr; + this->primitive_ = nullptr; + attr = nullptr; return RET_ERROR; } else { reduction = GetValue(prim.GetAttr("reduction")); diff --git a/mindspore/lite/src/ops/control_depend.cc b/mindspore/lite/src/ops/control_depend.cc index f4acf6428b..c5296bdd17 100644 --- a/mindspore/lite/src/ops/control_depend.cc +++ b/mindspore/lite/src/ops/control_depend.cc @@ -32,6 +32,7 @@ int ControlDepend::UnPackAttr(const Primitive &prim, const std::vectorprimitive_->value.type != schema::PrimitiveType_ControlDepend) { MS_LOG(ERROR) << "primitive_ type is error:" << this->primitive_->value.type; delete this->primitive_; + this->primitive_ = nullptr; return RET_ERROR; } if (this->primitive_->value.value == nullptr) { @@ -39,6 +40,7 @@ int ControlDepend::UnPackAttr(const Primitive &prim, const std::vectorprimitive_; + this->primitive_ = nullptr; return RET_ERROR; } this->primitive_->value.value = attr; diff --git a/mindspore/lite/src/ops/expand_dims.cc b/mindspore/lite/src/ops/expand_dims.cc index 8d4608cb2a..ee8bc767cc 100644 --- a/mindspore/lite/src/ops/expand_dims.cc +++ b/mindspore/lite/src/ops/expand_dims.cc @@ -39,12 +39,14 @@ int ExpandDims::UnPackAttr(const Primitive &prim, const std::vector if (this->primitive_->value.type != schema::PrimitiveType_ExpandDims) { MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; delete this->primitive_; + this->primitive_ = nullptr; return RET_ERROR; } if (this->primitive_->value.value == nullptr) { auto attr = new (std::nothrow) schema::ExpandDimsT(); if (attr == nullptr) { delete this->primitive_; + this->primitive_ = nullptr; MS_LOG(ERROR) << "new primitiveT value failed"; return RET_ERROR; } @@ -57,6 +59,8 @@ int ExpandDims::UnPackAttr(const Primitive &prim, const std::vector MS_LOG(ERROR) << "input axis is not value node."; delete this->primitive_; delete attr; + this->primitive_ = nullptr; + attr = nullptr; return RET_ERROR; } this->primitive_->value.value = attr; diff --git a/mindspore/lite/src/ops/gather.cc b/mindspore/lite/src/ops/gather.cc index 93f28be14b..16245068fb 100644 --- a/mindspore/lite/src/ops/gather.cc +++ b/mindspore/lite/src/ops/gather.cc @@ -44,6 +44,7 @@ int Gather::UnPackAttr(const Primitive &prim, const std::vector &inp MS_LOG(ERROR) << "Gather primitive value type : " << schema::EnumNamePrimitiveType(primitive_->value.type) << "is not equal" << schema::EnumNamePrimitiveType(schema::PrimitiveType_Gather); delete this->primitive_; + this->primitive_ = nullptr; return RET_ERROR; } if (this->primitive_->value.value == nullptr) { @@ -52,6 +53,8 @@ int Gather::UnPackAttr(const Primitive &prim, const std::vector &inp MS_LOG(ERROR) << "new primitive value.value error"; delete this->primitive_; delete gather_attr; + this->primitive_ = nullptr; + gather_attr = nullptr; return RET_ERROR; } if (inputs[2]->isa()) { diff --git a/mindspore/lite/src/ops/oneslike.cc b/mindspore/lite/src/ops/oneslike.cc index 419c7be533..d447de56b9 100644 --- a/mindspore/lite/src/ops/oneslike.cc +++ b/mindspore/lite/src/ops/oneslike.cc @@ -37,6 +37,7 @@ int OnesLike::UnPackAttr(const Primitive &prim, const std::vector &i << schema::EnumNamePrimitiveType(primitive_->value.type) << "is not equal" << schema::EnumNamePrimitiveType(schema::PrimitiveType_OnesLike); delete this->primitive_; + this->primitive_ = nullptr; return RET_ERROR; } if (this->primitive_->value.value == nullptr) { @@ -44,6 +45,7 @@ int OnesLike::UnPackAttr(const Primitive &prim, const std::vector &i if (this->primitive_->value.value == nullptr) { MS_LOG(ERROR) << "new primitiveT value failed"; delete this->primitive_; + this->primitive_ = nullptr; return RET_ERROR; } } diff --git a/mindspore/lite/src/ops/populate/layer_norm_populate.cc b/mindspore/lite/src/ops/populate/layer_norm_populate.cc index 74d94e1630..a2ae098d04 100644 --- a/mindspore/lite/src/ops/populate/layer_norm_populate.cc +++ b/mindspore/lite/src/ops/populate/layer_norm_populate.cc @@ -35,6 +35,8 @@ OpParameter *PopulateLayerNormParameter(const mindspore::lite::PrimitiveC *primi layer_norm_parameter->normalized_shape_ = reinterpret_cast(malloc(normalized_shape.size() * sizeof(int))); if (layer_norm_parameter->normalized_shape_ == nullptr) { MS_LOG(ERROR) << "malloc layer_norm_parameter->normalized_shape_ failed."; + free(layer_norm_parameter); + layer_norm_parameter = nullptr; return nullptr; } for (size_t i = 0; i < normalized_shape.size(); i++) { diff --git a/mindspore/lite/src/ops/power.cc b/mindspore/lite/src/ops/power.cc index 7a59d4544e..f99bfa8124 100644 --- a/mindspore/lite/src/ops/power.cc +++ b/mindspore/lite/src/ops/power.cc @@ -43,6 +43,7 @@ int Power::UnPackAttr(const Primitive &prim, const std::vector &inpu if (this->primitive_->value.type != schema::PrimitiveType_Power) { MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; delete this->primitive_; + this->primitive_ = nullptr; return RET_ERROR; } if (this->primitive_->value.value == nullptr) { @@ -50,6 +51,7 @@ int Power::UnPackAttr(const Primitive &prim, const std::vector &inpu if (attr == nullptr) { MS_LOG(ERROR) << "new primitiveT value failed"; delete this->primitive_; + this->primitive_ = nullptr; return RET_ERROR; } diff --git a/mindspore/lite/src/ops/resize.cc b/mindspore/lite/src/ops/resize.cc index 991f99f2ee..a2638be972 100644 --- a/mindspore/lite/src/ops/resize.cc +++ b/mindspore/lite/src/ops/resize.cc @@ -63,9 +63,7 @@ int Resize::UnPackAttr(const Primitive &prim, const std::vector &inp } else if (prim.instance_name() == "ResizeBilinear") { attr->method = schema::ResizeMethod_LINEAR; } else { - if (attr != nullptr) { - delete attr; - } + delete attr; MS_LOG(ERROR) << "wrong resize type"; return RET_ERROR; } diff --git a/mindspore/lite/src/ops/sub.cc b/mindspore/lite/src/ops/sub.cc index 4b4282f38b..5fd57344a7 100644 --- a/mindspore/lite/src/ops/sub.cc +++ b/mindspore/lite/src/ops/sub.cc @@ -41,6 +41,7 @@ int Sub::UnPackAttr(const Primitive &prim, const std::vector &inputs if (this->primitive_->value.type != schema::PrimitiveType_Sub) { MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; delete this->primitive_; + this->primitive_ = nullptr; return RET_ERROR; } if (this->primitive_->value.value == nullptr) { @@ -48,6 +49,7 @@ int Sub::UnPackAttr(const Primitive &prim, const std::vector &inputs if (attr == nullptr) { MS_LOG(ERROR) << "new primitiveT value failed"; delete this->primitive_; + this->primitive_ = nullptr; return RET_ERROR; } // todo: confirm the activationType diff --git a/mindspore/lite/src/ops/unsorted_segment_sum.cc b/mindspore/lite/src/ops/unsorted_segment_sum.cc index d60d30a48e..3356128d5d 100644 --- a/mindspore/lite/src/ops/unsorted_segment_sum.cc +++ b/mindspore/lite/src/ops/unsorted_segment_sum.cc @@ -41,6 +41,7 @@ int UnsortedSegmentSum::UnPackAttr(const Primitive &prim, const std::vectorvalue.type) << "is not equal" << schema::EnumNamePrimitiveType(schema::PrimitiveType_UnsortedSegmentSum); delete this->primitive_; + this->primitive_ = nullptr; return RET_ERROR; } if (this->primitive_->value.value == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h index f855599f85..128d2b851c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.h @@ -171,15 +171,15 @@ class ArithmeticCPUKernel : public LiteKernel { private: int BroadcastRun(void *input0, void *input1, void *output, int dim, int out_count, int out_thread_stride); - int break_pos_; - int outside_; - int thread_count_; - ArithmeticParameter *arithmeticParameter_; + int break_pos_ = 0; + int outside_ = 0; + int thread_count_ = 1; + ArithmeticParameter *arithmeticParameter_ = nullptr; ArithmeticRun arithmetic_run_ = nullptr; ArithmeticOptRun arithmetic_opt_run_ = nullptr; ArithmeticIntRun arithmetic_run_int_ = nullptr; ArithmeticOptIntRun arithmetic_opt_run_int_ = nullptr; - LiteDataType data_type_; + LiteDataType data_type_ = kDataTypeFloat; }; } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_ARITHMETIC_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd.cc index 41e2e871f5..d4d4cecdc5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd.cc @@ -68,22 +68,24 @@ void DeConvolutionWinogradCPUKernel::FreeResizeBuf() { } void DeConvolutionWinogradCPUKernel::FreeDeconvParam() { - for (int i = 0; i < deconv_param_->compute_size_; i++) { - DeConvComputeUnit &unit = deconv_param_->compute_units_[i]; - - if (unit.weight_ != nullptr) { - free(unit.weight_); - unit.weight_ = nullptr; - } + if (deconv_param_ != nullptr) { + for (int i = 0; i < deconv_param_->compute_size_; i++) { + DeConvComputeUnit &unit = deconv_param_->compute_units_[i]; - if (unit.use_winograd_) { - if (unit.winograd_.AT_ != nullptr) { - free(unit.winograd_.AT_); - unit.winograd_.AT_ = nullptr; + if (unit.weight_ != nullptr) { + free(unit.weight_); + unit.weight_ = nullptr; } - if (unit.winograd_.BT_ != nullptr) { - free(unit.winograd_.BT_); - unit.winograd_.BT_ = nullptr; + + if (unit.use_winograd_) { + if (unit.winograd_.AT_ != nullptr) { + free(unit.winograd_.AT_); + unit.winograd_.AT_ = nullptr; + } + if (unit.winograd_.BT_ != nullptr) { + free(unit.winograd_.BT_); + unit.winograd_.BT_ = nullptr; + } } } } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc index e894a51f70..df64fe3670 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc @@ -204,7 +204,7 @@ int MirrorPadImplInt8(void *cdata, int task_id) { return RET_OK; } -int PadInt8CPUKernel::CheckPaddings(const int *paddings, int length, int *input_shape, int mode) { +int PadInt8CPUKernel::CheckPaddings(const int *paddings, int length, const int *input_shape, int mode) { if (paddings == nullptr || input_shape == nullptr) { return RET_NULL_PTR; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h index bb811ee94f..1062b47ced 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h @@ -48,7 +48,7 @@ class PadInt8CPUKernel : public LiteKernel { private: int HandleMirrorPad(); - int CheckPaddings(const int *paddings, int length, int *input_shape, int mode); + int CheckPaddings(const int *paddings, int length, const int *input_shape, int mode); int CopyPaddingFromInput(); void CalculateStrides(); int ExtendPaddings(int *paddings, int length, const int *ori_paddings, int ori_length);