From: @yangruoqi713 Reviewed-by: @hangangqiang,@zhanghaibo5 Signed-off-by: @zhanghaibo5pull/15688/MERGE
| @@ -89,9 +89,9 @@ int ArithmeticInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso | |||
| if (input_shape0_size >= MAX_SHAPE_SIZE || input_shape1_size >= MAX_SHAPE_SIZE) { | |||
| return NNACL_ERR; | |||
| } | |||
| int in_shape0[10]; | |||
| int in_shape1[10]; | |||
| int out_shape[10]; | |||
| int in_shape0[10] = {0}; | |||
| int in_shape1[10] = {0}; | |||
| int out_shape[10] = {0}; | |||
| int ndim = input_shape0_size; | |||
| UpdateInputShape(input_shape0_size, input_shape1_size, &ndim, input_shape0, input_shape1, in_shape0, in_shape1); | |||
| @@ -23,7 +23,7 @@ unsigned Log2Ceil(unsigned length) { | |||
| } | |||
| int floor = 0; | |||
| for (int i = 4; i >= 0; --i) { | |||
| const unsigned shift = (1 << i); | |||
| const unsigned shift = (1 << (unsigned)i); | |||
| unsigned tmp = length >> shift; | |||
| if (tmp != 0) { | |||
| length = tmp; | |||
| @@ -415,9 +415,12 @@ int VectorCPush(VectorC *vc, int value) { | |||
| return NNACL_OK; | |||
| } | |||
| void VectorCInsert(VectorC *vc, int index, int value) { | |||
| int VectorCInsert(VectorC *vc, int index, int value) { | |||
| if (vc->size_ + 1 > vc->max_size_) { | |||
| int *tmp = (int *)malloc(vc->per_malloc_size_ * sizeof(int) + vc->max_size_ * sizeof(int)); | |||
| if (tmp == NULL) { | |||
| return NNACL_ERR; | |||
| } | |||
| memcpy(tmp, vc->data_, vc->size_ * sizeof(int)); | |||
| free(vc->data_); | |||
| vc->data_ = tmp; | |||
| @@ -426,6 +429,7 @@ void VectorCInsert(VectorC *vc, int index, int value) { | |||
| memmove(vc->data_ + index + 1, vc->data_ + index, (vc->size_ - index) * sizeof(int)); | |||
| vc->data_[index] = value; | |||
| vc->size_++; | |||
| return NNACL_OK; | |||
| } | |||
| void VectorCErase(VectorC *vc, int index) { | |||
| @@ -200,7 +200,7 @@ int FftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **ou | |||
| int VectorCInit(VectorC *vc, size_t per_malloc_size); | |||
| int VectorCSet(VectorC *vc, const int *src_shape, size_t src_shape_size); | |||
| int VectorCPush(VectorC *vc, int value); | |||
| void VectorCInsert(VectorC *vc, int index, int value); | |||
| int VectorCInsert(VectorC *vc, int index, int value); | |||
| void VectorCErase(VectorC *vc, int index); | |||
| bool VectorCEqual(VectorC *vc1, VectorC *vc2); | |||
| void VectorCFree(VectorC *vc); | |||
| @@ -81,6 +81,9 @@ int MatmulInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * | |||
| int c_shape[MAX_SHAPE_SIZE]; | |||
| size_t c_shape_size = 0; | |||
| ShapeSet(c_shape, &c_shape_size, a_shape, a_shape_size); | |||
| if (c_shape_size < 1 || b_shape_size < 1) { | |||
| return NNACL_ERR; | |||
| } | |||
| c_shape[c_shape_size - 1] = b_shape[b_shape_size - 1]; | |||
| if (del_start) { | |||
| ShapeErase(c_shape, &c_shape_size, 0); | |||
| @@ -88,7 +88,7 @@ int ReduceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * | |||
| } | |||
| int rank = (int)(input->shape_size_); | |||
| int actual_axes[MAX_SHAPE_SIZE]; | |||
| int actual_axes[MAX_SHAPE_SIZE] = {0}; | |||
| size_t actual_axes_size = 0; | |||
| ShapeSet(actual_axes, &actual_axes_size, axes, num_axes); | |||
| @@ -81,59 +81,52 @@ int CalShapeByType(const TensorC *const *inputs, size_t shape_size, int *out_sha | |||
| if (shape_size == 0) { | |||
| return NNACL_ERR; | |||
| } | |||
| int *data_int = (int *)malloc(sizeof(int) * shape_size); | |||
| if (data_int == NULL) { | |||
| return NNACL_ERR; | |||
| } | |||
| switch (shape_tensor->data_type_) { | |||
| case kNumberTypeInt8: { | |||
| int8_t *data = (int8_t *)(shape_tensor->data_); | |||
| int *data_int = (int *)malloc(sizeof(int) * shape_size); | |||
| for (size_t i = 0; i < shape_size; i++) { | |||
| data_int[i] = data[i]; | |||
| } | |||
| CalShape(data_int, inputs, out_shape, out_shape_size, shape_size); | |||
| free(data_int); | |||
| } break; | |||
| case kNumberTypeInt32: { | |||
| int32_t *data = (int32_t *)(shape_tensor->data_); | |||
| int *data_int = (int *)malloc(sizeof(int) * shape_size); | |||
| for (size_t i = 0; i < shape_size; i++) { | |||
| data_int[i] = data[i]; | |||
| } | |||
| CalShape(data_int, inputs, out_shape, out_shape_size, shape_size); | |||
| free(data_int); | |||
| } break; | |||
| case kNumberTypeInt64: { | |||
| int64_t *data = (int64_t *)(shape_tensor->data_); | |||
| int *data_int = (int *)malloc(sizeof(int) * shape_size); | |||
| for (size_t i = 0; i < shape_size; i++) { | |||
| data_int[i] = data[i]; | |||
| } | |||
| CalShape(data_int, inputs, out_shape, out_shape_size, shape_size); | |||
| free(data_int); | |||
| } break; | |||
| case kNumberTypeFloat: { | |||
| float *data = (float *)(shape_tensor->data_); | |||
| int *data_int = (int *)malloc(sizeof(int) * shape_size); | |||
| for (size_t i = 0; i < shape_size; i++) { | |||
| data_int[i] = data[i]; | |||
| } | |||
| CalShape(data_int, inputs, out_shape, out_shape_size, shape_size); | |||
| free(data_int); | |||
| } break; | |||
| case kNumberTypeUInt32: { | |||
| uint32_t *data = (uint32_t *)(shape_tensor->data_); | |||
| int *data_int = (int *)malloc(sizeof(int) * shape_size); | |||
| if (data_int == NULL) { | |||
| return NNACL_ERR; | |||
| } | |||
| for (size_t i = 0; i < shape_size; i++) { | |||
| data_int[i] = data[i]; | |||
| } | |||
| CalShape(data_int, inputs, out_shape, out_shape_size, shape_size); | |||
| free(data_int); | |||
| } break; | |||
| default: { | |||
| free(data_int); | |||
| return NNACL_ERR; | |||
| } | |||
| } | |||
| free(data_int); | |||
| return NNACL_OK; | |||
| } | |||
| @@ -244,7 +244,7 @@ int TransIndexToPositive(StridedSliceTransferBuffer *transfer_buffer, const int | |||
| } | |||
| void ApplyShrinkMask(StridedSliceTransferBuffer *transfer_buffer, int *output_shape, size_t *output_shape_size) { | |||
| int old_out_shape[MAX_SHAPE_SIZE]; | |||
| int old_out_shape[MAX_SHAPE_SIZE] = {0}; | |||
| size_t old_out_shape_size = 0; | |||
| ShapeSet(old_out_shape, &old_out_shape_size, output_shape, *output_shape_size); | |||
| *output_shape_size = 0; | |||
| @@ -317,7 +317,7 @@ int StridedSliceInferShape(const TensorC *const *inputs, size_t inputs_size, Ten | |||
| return NNACL_INFER_INVALID; | |||
| } | |||
| int in_shape[MAX_SHAPE_SIZE]; | |||
| int in_shape[MAX_SHAPE_SIZE] = {0}; | |||
| size_t in_shape_size = 0; | |||
| if (input->shape_size_ > MAX_SHAPE_SIZE) { | |||
| return NNACL_ERR; | |||
| @@ -87,12 +87,19 @@ int GetPartialGraphIndex(const void *primitive) { | |||
| int index = -1; | |||
| int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); | |||
| if (schema_version == SCHEMA_CUR) { | |||
| MS_ASSERT(static_cast<const schema::Primitive *>(primitive)->value_as_PartialFusion() != nullptr); | |||
| index = static_cast<const schema::Primitive *>(primitive)->value_as_PartialFusion()->sub_graph_index(); | |||
| auto partial_fusion = reinterpret_cast<const schema::Primitive *>(primitive)->value_as_PartialFusion(); | |||
| if (partial_fusion == nullptr) { | |||
| return -1; | |||
| } | |||
| index = partial_fusion->sub_graph_index(); | |||
| } | |||
| #ifdef ENABLE_V0 | |||
| if (schema_version == SCHEMA_V0) { | |||
| index = static_cast<const schema::v0::Primitive *>(primitive)->value_as_Partial()->subGraphIndex(); | |||
| auto partial = reinterpret_cast<const schema::v0::Primitive *>(primitive)->value_as_Partial(); | |||
| if (partial == nullptr) { | |||
| return -1; | |||
| } | |||
| index = partial->subGraphIndex(); | |||
| } | |||
| #endif | |||
| return index; | |||
| @@ -117,13 +124,19 @@ int GetWhileBodySubgraphIndex(const void *primitive) { | |||
| int index = -1; | |||
| int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); | |||
| if (schema_version == SCHEMA_CUR) { | |||
| MS_ASSERT(static_cast<const schema::Primitive *>(primitive)->value_as_While() != nullptr); | |||
| index = reinterpret_cast<const schema::Primitive *>(primitive)->value_as_While()->body_subgraph_index(); | |||
| auto while_value = reinterpret_cast<const schema::Primitive *>(primitive)->value_as_While(); | |||
| if (while_value == nullptr) { | |||
| return -1; | |||
| } | |||
| index = while_value->body_subgraph_index(); | |||
| } | |||
| #ifdef ENABLE_V0 | |||
| if (schema_version == SCHEMA_V0) { | |||
| MS_ASSERT(static_cast<const schema::Primitive *>(primitive)->value_as_While() != nullptr); | |||
| index = reinterpret_cast<const schema::v0::Primitive *>(primitive)->value_as_While()->bodySubgraphIndex(); | |||
| auto while_value = reinterpret_cast<const schema::v0::Primitive *>(primitive)->value_as_While(); | |||
| if (while_value == nullptr) { | |||
| return -1; | |||
| } | |||
| index = while_value->bodySubgraphIndex(); | |||
| } | |||
| #endif | |||
| return index; | |||
| @@ -134,13 +147,19 @@ int GetWhileCondSubgraphIndex(const void *primitive) { | |||
| int index = -1; | |||
| int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); | |||
| if (schema_version == SCHEMA_CUR) { | |||
| MS_ASSERT(static_cast<const schema::Primitive *>(primitive)->value_as_While() != nullptr); | |||
| index = reinterpret_cast<const schema::Primitive *>(primitive)->value_as_While()->cond_subgraph_index(); | |||
| auto while_value = reinterpret_cast<const schema::Primitive *>(primitive)->value_as_While(); | |||
| if (while_value == nullptr) { | |||
| return -1; | |||
| } | |||
| index = while_value->cond_subgraph_index(); | |||
| } | |||
| #ifdef ENABLE_V0 | |||
| if (schema_version == SCHEMA_V0) { | |||
| MS_ASSERT(static_cast<const schema::Primitive *>(primitive)->value_as_While() != nullptr); | |||
| index = reinterpret_cast<const schema::v0::Primitive *>(primitive)->value_as_While()->condSubgraphIndex(); | |||
| auto while_value = reinterpret_cast<const schema::v0::Primitive *>(primitive)->value_as_While(); | |||
| if (while_value == nullptr) { | |||
| return -1; | |||
| } | |||
| index = while_value->condSubgraphIndex(); | |||
| } | |||
| #endif | |||
| return index; | |||
| @@ -43,8 +43,8 @@ int ConvolutionCPUKernel::InitWeightBias() { | |||
| conv_param_->input_channel_ = in_channel; | |||
| conv_param_->output_channel_ = out_channel; | |||
| size_t kernel_plane = filter_tensor->Height() * filter_tensor->Width(); | |||
| int oc_block_num = UP_ROUND(out_channel, OC_BLOCK); | |||
| int pack_weight_size = oc_block_num * in_channel * kernel_plane; | |||
| size_t oc_block_num = UP_ROUND(out_channel, OC_BLOCK); | |||
| size_t pack_weight_size = oc_block_num * in_channel * kernel_plane; | |||
| packed_weight_ = reinterpret_cast<float *>(malloc(pack_weight_size * sizeof(float))); | |||
| if (packed_weight_ == nullptr) { | |||
| @@ -163,11 +163,11 @@ int ConvolutionCPUKernel::Run() { | |||
| void ConvolutionCPUKernel::PackWeight() { | |||
| auto filter_tensor = in_tensors_.at(kWeightIndex); | |||
| int in_channel = filter_tensor->Channel(); | |||
| int out_channel = filter_tensor->Batch(); | |||
| int kernel_plane = filter_tensor->Height() * filter_tensor->Width(); | |||
| int oc_block_num = UP_ROUND(out_channel, OC_BLOCK); | |||
| int pack_weight_size = oc_block_num * in_channel * kernel_plane; | |||
| size_t in_channel = filter_tensor->Channel(); | |||
| size_t out_channel = filter_tensor->Batch(); | |||
| size_t kernel_plane = filter_tensor->Height() * filter_tensor->Width(); | |||
| size_t oc_block_num = UP_ROUND(out_channel, OC_BLOCK); | |||
| size_t pack_weight_size = oc_block_num * in_channel * kernel_plane; | |||
| auto origin_weight = reinterpret_cast<float *>(filter_tensor->data_c()); | |||
| memset(packed_weight_, 0, pack_weight_size * sizeof(float)); | |||
| @@ -81,7 +81,7 @@ class ConvolutionWinogradCPUKernel : public ConvolutionBaseCPUKernel { | |||
| float *gemm_out_ = nullptr; | |||
| float *col_buffer_ = nullptr; | |||
| float *trans_weight_ = nullptr; | |||
| TmpBufferAddress tmp_buffer_address_list_[4]; | |||
| TmpBufferAddress tmp_buffer_address_list_[4] = {nullptr}; | |||
| InputTransFunc in_func_ = nullptr; | |||
| OutputTransFunc out_func_ = nullptr; | |||
| }; | |||
| @@ -64,34 +64,33 @@ MatmulBaseInt8CPUKernel::~MatmulBaseInt8CPUKernel() { | |||
| free(bias_ptr_); | |||
| bias_ptr_ = nullptr; | |||
| } | |||
| if (quant_param_ != nullptr) { | |||
| free(quant_param_); | |||
| quant_param_ = nullptr; | |||
| } | |||
| } | |||
| void MatmulBaseInt8CPUKernel::FreeQuantParam() { | |||
| if (quant_param_->filter_scale_ != nullptr) { | |||
| free(quant_param_->filter_scale_); | |||
| quant_param_->filter_scale_ = nullptr; | |||
| } | |||
| if (quant_param_->filter_zp_ != nullptr) { | |||
| free(quant_param_->filter_zp_); | |||
| quant_param_->filter_zp_ = nullptr; | |||
| } | |||
| if (quant_param_->left_shift_ != nullptr) { | |||
| free(quant_param_->left_shift_); | |||
| quant_param_->left_shift_ = nullptr; | |||
| } | |||
| if (quant_param_->right_shift_ != nullptr) { | |||
| free(quant_param_->right_shift_); | |||
| quant_param_->right_shift_ = nullptr; | |||
| } | |||
| if (quant_param_->quant_multiplier_ != nullptr) { | |||
| free(quant_param_->quant_multiplier_); | |||
| quant_param_->quant_multiplier_ = nullptr; | |||
| if (quant_param_ != nullptr) { | |||
| if (quant_param_->filter_scale_ != nullptr) { | |||
| free(quant_param_->filter_scale_); | |||
| quant_param_->filter_scale_ = nullptr; | |||
| } | |||
| if (quant_param_->filter_zp_ != nullptr) { | |||
| free(quant_param_->filter_zp_); | |||
| quant_param_->filter_zp_ = nullptr; | |||
| } | |||
| if (quant_param_->left_shift_ != nullptr) { | |||
| free(quant_param_->left_shift_); | |||
| quant_param_->left_shift_ = nullptr; | |||
| } | |||
| if (quant_param_->right_shift_ != nullptr) { | |||
| free(quant_param_->right_shift_); | |||
| quant_param_->right_shift_ = nullptr; | |||
| } | |||
| if (quant_param_->quant_multiplier_ != nullptr) { | |||
| free(quant_param_->quant_multiplier_); | |||
| quant_param_->quant_multiplier_ = nullptr; | |||
| } | |||
| free(quant_param_); | |||
| quant_param_ = nullptr; | |||
| } | |||
| return; | |||
| } | |||
| int MatmulBaseInt8CPUKernel::MallocQuantParam() { | |||
| @@ -62,7 +62,7 @@ void SearchSubGraph::ConvertSubGraphToModel() { | |||
| Model::Node *new_partial_node = new (std::nothrow) Model::Node(); | |||
| if (new_partial_node == nullptr) { | |||
| MS_LOG(ERROR) << "New partial node failed!"; | |||
| free(new_sub_graph); | |||
| delete new_sub_graph; | |||
| return; | |||
| } | |||
| new_partial_node->name_ = "Partial-subgraph-split-" + std::to_string(new_sub_index); | |||