| @@ -551,8 +551,8 @@ STATUS WeightQuantizer::DoQuantSearch(const FuncGraphPtr &func_graph) { | |||||
| // copy origin data in case to recover | // copy origin data in case to recover | ||||
| auto *raw_data = static_cast<float *>(param_value->tensor_addr()); | auto *raw_data = static_cast<float *>(param_value->tensor_addr()); | ||||
| auto elem_count = param_value->tensor_shape_size(); | auto elem_count = param_value->tensor_shape_size(); | ||||
| auto origin_data = malloc(sizeof(float) * elem_count); | |||||
| auto ret = memcpy_s(origin_data, sizeof(float) * elem_count, raw_data, param_value->tensor_size()); | |||||
| std::unique_ptr<float[]> origin_data(new (std::nothrow) float[elem_count]); | |||||
| auto ret = memcpy_s(origin_data.get(), sizeof(float) * elem_count, raw_data, param_value->tensor_size()); | |||||
| if (ret != EOK) { | if (ret != EOK) { | ||||
| MS_LOG(ERROR) << "memcpy fail: " | MS_LOG(ERROR) << "memcpy fail: " | ||||
| << " dst size: " << sizeof(float) * elem_count << " src size: " << param_value->tensor_size(); | << " dst size: " << sizeof(float) * elem_count << " src size: " << param_value->tensor_size(); | ||||
| @@ -617,7 +617,7 @@ STATUS WeightQuantizer::DoQuantSearch(const FuncGraphPtr &func_graph) { | |||||
| MS_LOG(DEBUG) << "op: " << op_name << " intermediate bit: " << bit_num_t << " mean_error: " << mean_error | MS_LOG(DEBUG) << "op: " << op_name << " intermediate bit: " << bit_num_t << " mean_error: " << mean_error | ||||
| << " [recover]"; | << " [recover]"; | ||||
| // recover | // recover | ||||
| status = UpdateTensorDataAndSize(param_value, origin_data, sizeof(float) * elem_count); | |||||
| status = UpdateTensorDataAndSize(param_value, origin_data.get(), sizeof(float) * elem_count); | |||||
| if (status != RET_OK) { | if (status != RET_OK) { | ||||
| MS_LOG(ERROR) << "UpdateTensorDataAndSize fail"; | MS_LOG(ERROR) << "UpdateTensorDataAndSize fail"; | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| @@ -627,9 +627,8 @@ STATUS WeightQuantizer::DoQuantSearch(const FuncGraphPtr &func_graph) { | |||||
| opname_bit_[op_name] = bit_num_t; | opname_bit_[op_name] = bit_num_t; | ||||
| } | } | ||||
| } // end bit loop | } // end bit loop | ||||
| free(origin_data); | |||||
| } // if: conv and matmul | |||||
| } // end loop: all cnode | |||||
| } // if: conv and matmul | |||||
| } // end loop: all cnode | |||||
| return status; | return status; | ||||
| } | } | ||||
| @@ -246,7 +246,8 @@ STATUS TfLstmCellFusion::PopulateBiasNode(const EquivPtr &body_equiv, const Para | |||||
| default_param->set_tensor_shape(shape); | default_param->set_tensor_shape(shape); | ||||
| default_param->set_tensor_type(kNumberTypeFloat32); | default_param->set_tensor_type(kNumberTypeFloat32); | ||||
| default_param->set_format(schema::Format_NHWC); | default_param->set_format(schema::Format_NHWC); | ||||
| auto tensor_data = new (std::nothrow) float[hidden_size * 8]; | |||||
| std::unique_ptr<float[]> tensor_data(new (std::nothrow) float[hidden_size * 8]); | |||||
| auto forget_bias_node = utils::cast<AnfNodePtr>((*body_equiv)[forget_bias_]); | auto forget_bias_node = utils::cast<AnfNodePtr>((*body_equiv)[forget_bias_]); | ||||
| if (forget_bias_node == nullptr) { | if (forget_bias_node == nullptr) { | ||||
| @@ -271,13 +272,12 @@ STATUS TfLstmCellFusion::PopulateBiasNode(const EquivPtr &body_equiv, const Para | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| default_param->SetTensorData(tensor_data, hidden_size * 8 * 4); | |||||
| default_param->SetTensorData(tensor_data.release(), hidden_size * 8 * 4); | |||||
| new_bias->set_default_param(default_param); | new_bias->set_default_param(default_param); | ||||
| std::vector<int64_t> shape_vector_i(shape.begin(), shape.end()); | std::vector<int64_t> shape_vector_i(shape.begin(), shape.end()); | ||||
| auto abstract_tensor_i = std::make_shared<abstract::AbstractTensor>(kFloat32, shape_vector_i); | auto abstract_tensor_i = std::make_shared<abstract::AbstractTensor>(kFloat32, shape_vector_i); | ||||
| if (abstract_tensor_i == nullptr) { | if (abstract_tensor_i == nullptr) { | ||||
| MS_LOG(ERROR) << "abstract_tensor is nullptr"; | MS_LOG(ERROR) << "abstract_tensor is nullptr"; | ||||
| delete[] tensor_data; | |||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||
| new_bias->set_abstract(abstract_tensor_i); | new_bias->set_abstract(abstract_tensor_i); | ||||
| @@ -31,7 +31,7 @@ | |||||
| namespace mindspore::opt { | namespace mindspore::opt { | ||||
| ValueNodePtr IfPass::GetSwitchAnfPrim() { | ValueNodePtr IfPass::GetSwitchAnfPrim() { | ||||
| auto switch_primitiveT = new (std::nothrow) schema::PrimitiveT; | |||||
| std::unique_ptr<schema::PrimitiveT> switch_primitiveT(new (std::nothrow) schema::PrimitiveT); | |||||
| if (switch_primitiveT == nullptr) { | if (switch_primitiveT == nullptr) { | ||||
| MS_LOG(ERROR) << "new switch_primitiveT failed"; | MS_LOG(ERROR) << "new switch_primitiveT failed"; | ||||
| return nullptr; | return nullptr; | ||||
| @@ -43,7 +43,7 @@ ValueNodePtr IfPass::GetSwitchAnfPrim() { | |||||
| return nullptr; | return nullptr; | ||||
| } | } | ||||
| auto partial_prim = std::make_shared<lite::Partial>(switch_primitiveT); | |||||
| auto partial_prim = std::make_shared<lite::Partial>(switch_primitiveT.release()); | |||||
| ValueNodePtr partial_anf_prim = NewValueNode(partial_prim); | ValueNodePtr partial_anf_prim = NewValueNode(partial_prim); | ||||
| return partial_anf_prim; | return partial_anf_prim; | ||||
| } | } | ||||