| @@ -58,7 +58,11 @@ int AnfImporterFromMetaGraphT::ConverterConstTensor() { | |||||
| MS_LOG(ERROR) << "new char[] failed"; | MS_LOG(ERROR) << "new char[] failed"; | ||||
| return RET_MEMORY_FAILED; | return RET_MEMORY_FAILED; | ||||
| } | } | ||||
| std::memcpy(tensor_data, tensor->data.data(), size); | |||||
| auto ret = memcpy_s(tensor_data, size, tensor->data.data(), size); | |||||
| if (EOK != ret) { | |||||
| MS_LOG(ERROR) << "memcpy_s error"; | |||||
| return RET_MEMORY_FAILED; | |||||
| } | |||||
| param_value->set_tensor_addr(tensor_data); | param_value->set_tensor_addr(tensor_data); | ||||
| param_value->set_tensor_size(size); | param_value->set_tensor_size(size); | ||||
| parameter->set_default_param(param_value); | parameter->set_default_param(param_value); | ||||
| @@ -154,8 +158,16 @@ int AnfImporterFromMetaGraphT::ConvertAbstract(const std::unique_ptr<schema::CNo | |||||
| } | } | ||||
| auto tuple_get_item_prim = NewValueNode(tuple_get_item_prim_ptr); | auto tuple_get_item_prim = NewValueNode(tuple_get_item_prim_ptr); | ||||
| auto get_item_value = NewValueNode(MakeValue<int>(i)); | auto get_item_value = NewValueNode(MakeValue<int>(i)); | ||||
| if (tuple_get_item_prim == nullptr || get_item_value == nullptr) { | |||||
| MS_LOG(ERROR) << "NewValueNode is nullptr"; | |||||
| return RET_NULL_PTR; | |||||
| } | |||||
| std::vector<AnfNodePtr> inputs{tuple_get_item_prim, dst_cnode, get_item_value}; | std::vector<AnfNodePtr> inputs{tuple_get_item_prim, dst_cnode, get_item_value}; | ||||
| CNodePtr get_item_cnode = func_graph_->NewCNode(inputs); | CNodePtr get_item_cnode = func_graph_->NewCNode(inputs); | ||||
| if (get_item_cnode == nullptr) { | |||||
| MS_LOG(ERROR) << "NewCNode is nullptr"; | |||||
| return RET_NULL_PTR; | |||||
| } | |||||
| get_item_cnode->set_fullname_with_scope(src_cnode->name + "_getitem_" + std::to_string(i)); | get_item_cnode->set_fullname_with_scope(src_cnode->name + "_getitem_" + std::to_string(i)); | ||||
| AddNode(out_tensor_id, get_item_cnode); | AddNode(out_tensor_id, get_item_cnode); | ||||
| } | } | ||||
| @@ -216,6 +228,10 @@ int AnfImporterFromMetaGraphT::AddReturnCNode() { | |||||
| make_tuple_inputs.emplace_back(cNode); | make_tuple_inputs.emplace_back(cNode); | ||||
| } | } | ||||
| auto make_tuple_cnode = func_graph_->NewCNode(make_tuple_inputs); | auto make_tuple_cnode = func_graph_->NewCNode(make_tuple_inputs); | ||||
| if (make_tuple_cnode == nullptr) { | |||||
| MS_LOG(ERROR) << "NewCNode is nullptr"; | |||||
| return RET_NULL_PTR; | |||||
| } | |||||
| make_tuple_cnode->set_fullname_with_scope("return tuple"); | make_tuple_cnode->set_fullname_with_scope("return tuple"); | ||||
| std::vector<AnfNodePtr> op_inputs; | std::vector<AnfNodePtr> op_inputs; | ||||
| @@ -246,6 +262,10 @@ int AnfImporterFromMetaGraphT::AddReturnCNode() { | |||||
| } | } | ||||
| op_inputs.emplace_back(cnode); | op_inputs.emplace_back(cnode); | ||||
| auto return_cnode = func_graph_->NewCNode(op_inputs); | auto return_cnode = func_graph_->NewCNode(op_inputs); | ||||
| if (return_cnode == nullptr) { | |||||
| MS_LOG(ERROR) << "NewCNode is nullptr"; | |||||
| return RET_NULL_PTR; | |||||
| } | |||||
| return_cnode->set_fullname_with_scope("return"); | return_cnode->set_fullname_with_scope("return"); | ||||
| func_graph_->set_return(return_cnode); | func_graph_->set_return(return_cnode); | ||||
| } | } | ||||
| @@ -27,7 +27,7 @@ | |||||
| namespace mindspore::lite { | namespace mindspore::lite { | ||||
| class AnfImporterFromMetaGraphT : public AnfImporter { | class AnfImporterFromMetaGraphT : public AnfImporter { | ||||
| public: | public: | ||||
| explicit AnfImporterFromMetaGraphT(schema::MetaGraphT *meta_graph, FuncGraphPtr func_graph) | |||||
| AnfImporterFromMetaGraphT(schema::MetaGraphT *meta_graph, FuncGraphPtr func_graph) | |||||
| : meta_graph_(meta_graph), func_graph_(std::move(func_graph)) {} | : meta_graph_(meta_graph), func_graph_(std::move(func_graph)) {} | ||||
| ~AnfImporterFromMetaGraphT() override = default; | ~AnfImporterFromMetaGraphT() override = default; | ||||
| @@ -43,7 +43,6 @@ using int64 = int64_t; | |||||
| using uint64 = uint64_t; | using uint64 = uint64_t; | ||||
| namespace mindspore::lite { | namespace mindspore::lite { | ||||
| static constexpr char kConstantValueNode[] = "Constant"; | static constexpr char kConstantValueNode[] = "Constant"; | ||||
| enum ParseForm : int { | enum ParseForm : int { | ||||
| @@ -212,7 +211,7 @@ int AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &node | |||||
| node->set_name(value_proto.name()); | node->set_name(value_proto.name()); | ||||
| const auto &type_proto = value_proto.type(); | const auto &type_proto = value_proto.type(); | ||||
| if (!type_proto.has_tensor_type()) { | if (!type_proto.has_tensor_type()) { | ||||
| MS_LOG(ERROR) << "onnx TypeProto has no tesor_type! "; | |||||
| MS_LOG(ERROR) << "onnx TypeProto has no tensor_type! "; | |||||
| return RET_PARAM_INVALID; | return RET_PARAM_INVALID; | ||||
| } | } | ||||
| const onnx::TypeProto_Tensor &tensor_typeproto = type_proto.tensor_type(); | const onnx::TypeProto_Tensor &tensor_typeproto = type_proto.tensor_type(); | ||||
| @@ -248,6 +247,7 @@ int AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &node | |||||
| std::string initial_data = initialize_proto.raw_data(); | std::string initial_data = initialize_proto.raw_data(); | ||||
| auto *tensor_data_buf = reinterpret_cast<uint8_t *>(tensor_info->MutableData()); | auto *tensor_data_buf = reinterpret_cast<uint8_t *>(tensor_info->MutableData()); | ||||
| if (tensor_data_buf == nullptr) { | if (tensor_data_buf == nullptr) { | ||||
| delete tensor_info; | |||||
| return RET_MEMORY_FAILED; | return RET_MEMORY_FAILED; | ||||
| } | } | ||||
| tensor_info->set_data(nullptr); | tensor_info->set_data(nullptr); | ||||
| @@ -261,6 +261,7 @@ int AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &node | |||||
| ParamValueLitePtr param_value = std::make_shared<ParamValueLite>(); | ParamValueLitePtr param_value = std::make_shared<ParamValueLite>(); | ||||
| if (param_value == nullptr) { | if (param_value == nullptr) { | ||||
| delete tensor_info; | |||||
| return RET_NULL_PTR; | return RET_NULL_PTR; | ||||
| } | } | ||||
| param_value->set_tensor_addr(tensor_data_buf); | param_value->set_tensor_addr(tensor_data_buf); | ||||
| @@ -367,22 +368,38 @@ bool AnfImporterFromProtobuf::ObtainCNodeAttrInTensorForm(const PrimitivePtr &pr | |||||
| std::make_shared<tensor::Tensor>(kDefaultValueSwitchMap[attr_tensor_type], shape_vector); | std::make_shared<tensor::Tensor>(kDefaultValueSwitchMap[attr_tensor_type], shape_vector); | ||||
| auto *tensor_data_buf = reinterpret_cast<uint8_t *>(tensor_info->data_c()); | auto *tensor_data_buf = reinterpret_cast<uint8_t *>(tensor_info->data_c()); | ||||
| ret = memcpy_s(tensor_data_buf, tensor_info->Size(), tensor_buf.data(), tensor_buf.size()); | ret = memcpy_s(tensor_data_buf, tensor_info->Size(), tensor_buf.data(), tensor_buf.size()); | ||||
| if (EOK != ret) { | |||||
| MS_LOG(ERROR) << "memcpy_s error"; | |||||
| return false; | |||||
| } | |||||
| prim->set_attr(attr_name, MakeValue(tensor_info)); | prim->set_attr(attr_name, MakeValue(tensor_info)); | ||||
| } else { | } else { | ||||
| if (attr_tensor_type == onnx::TensorProto_DataType_DOUBLE) { | if (attr_tensor_type == onnx::TensorProto_DataType_DOUBLE) { | ||||
| size_t data_size = sizeof(double); | size_t data_size = sizeof(double); | ||||
| double attr_value = 0.0; | double attr_value = 0.0; | ||||
| ret = memcpy_s(&attr_value, data_size, tensor_buf.data(), tensor_buf.size()); | ret = memcpy_s(&attr_value, data_size, tensor_buf.data(), tensor_buf.size()); | ||||
| if (EOK != ret) { | |||||
| MS_LOG(ERROR) << "memcpy_s error"; | |||||
| return false; | |||||
| } | |||||
| prim->set_attr(attr_name, MakeValue<double>(attr_value)); | prim->set_attr(attr_name, MakeValue<double>(attr_value)); | ||||
| } else if (attr_tensor_type == onnx::TensorProto_DataType_INT64) { | } else if (attr_tensor_type == onnx::TensorProto_DataType_INT64) { | ||||
| size_t data_size = sizeof(int64_t); | size_t data_size = sizeof(int64_t); | ||||
| int64_t attr_value = 0; | int64_t attr_value = 0; | ||||
| ret = memcpy_s(&attr_value, data_size, tensor_buf.data(), tensor_buf.size()); | ret = memcpy_s(&attr_value, data_size, tensor_buf.data(), tensor_buf.size()); | ||||
| if (EOK != ret) { | |||||
| MS_LOG(ERROR) << "memcpy_s error"; | |||||
| return false; | |||||
| } | |||||
| prim->set_attr(attr_name, MakeValue<int64_t>(attr_value)); | prim->set_attr(attr_name, MakeValue<int64_t>(attr_value)); | ||||
| } else if (attr_tensor_type == onnx::TensorProto_DataType_BOOL) { | } else if (attr_tensor_type == onnx::TensorProto_DataType_BOOL) { | ||||
| size_t data_size = sizeof(bool); | size_t data_size = sizeof(bool); | ||||
| bool attr_value = false; | bool attr_value = false; | ||||
| ret = memcpy_s(&attr_value, data_size, tensor_buf.data(), tensor_buf.size()); | ret = memcpy_s(&attr_value, data_size, tensor_buf.data(), tensor_buf.size()); | ||||
| if (EOK != ret) { | |||||
| MS_LOG(ERROR) << "memcpy_s error"; | |||||
| return false; | |||||
| } | |||||
| prim->set_attr(attr_name, MakeValue<bool>(attr_value)); | prim->set_attr(attr_name, MakeValue<bool>(attr_value)); | ||||
| } | } | ||||
| } | } | ||||
| @@ -399,7 +416,7 @@ bool AnfImporterFromProtobuf::GetAttrValueForCNode(const PrimitivePtr &prim, con | |||||
| return false; | return false; | ||||
| } | } | ||||
| const std::string &ref_attr_name = attr_proto.ref_attr_name(); | const std::string &ref_attr_name = attr_proto.ref_attr_name(); | ||||
| string type; | |||||
| string type = ""; | |||||
| std::size_t pos(0); | std::size_t pos(0); | ||||
| if ((pos = ref_attr_name.find("scalar:")) != std::string::npos) { | if ((pos = ref_attr_name.find("scalar:")) != std::string::npos) { | ||||
| type = ref_attr_name.substr(pos, string("scalar:").length() - 1); | type = ref_attr_name.substr(pos, string("scalar:").length() - 1); | ||||
| @@ -503,7 +520,7 @@ bool AnfImporterFromProtobuf::GetAttrValueForValueNode(const std::string &value_ | |||||
| return false; | return false; | ||||
| } | } | ||||
| const std::string &ref_attr_name = attr_proto.ref_attr_name(); | const std::string &ref_attr_name = attr_proto.ref_attr_name(); | ||||
| string type; | |||||
| string type = ""; | |||||
| std::size_t pos(0); | std::size_t pos(0); | ||||
| if ((pos = ref_attr_name.find("scalar:")) != std::string::npos) { | if ((pos = ref_attr_name.find("scalar:")) != std::string::npos) { | ||||
| type = ref_attr_name.substr(pos, string("scalar:").length() - 1); | type = ref_attr_name.substr(pos, string("scalar:").length() - 1); | ||||
| @@ -682,9 +699,17 @@ bool AnfImporterFromProtobuf::BuildReturnForFuncGraph(const FuncGraphPtr &output | |||||
| const onnx::ValueInfoProto &output_node = importProto.output(out_size); | const onnx::ValueInfoProto &output_node = importProto.output(out_size); | ||||
| const std::string &out_tuple = output_node.name(); | const std::string &out_tuple = output_node.name(); | ||||
| inputs.push_back(anfnode_build_map_[out_tuple]); | inputs.push_back(anfnode_build_map_[out_tuple]); | ||||
| if (anfnode_build_map_[out_tuple] == nullptr) { | |||||
| MS_LOG(ERROR) << "AnfNode is nullptr"; | |||||
| return false; | |||||
| } | |||||
| elem.push_back(anfnode_build_map_[out_tuple]->abstract()); | elem.push_back(anfnode_build_map_[out_tuple]->abstract()); | ||||
| } | } | ||||
| auto maketuple_ptr = outputFuncGraph->NewCNode(inputs); | auto maketuple_ptr = outputFuncGraph->NewCNode(inputs); | ||||
| if (maketuple_ptr == nullptr) { | |||||
| MS_LOG(ERROR) << "maketuple_ptr is nullptr"; | |||||
| return false; | |||||
| } | |||||
| maketuple_ptr->set_abstract(std::make_shared<abstract::AbstractTuple>(elem)); | maketuple_ptr->set_abstract(std::make_shared<abstract::AbstractTuple>(elem)); | ||||
| inputs.clear(); | inputs.clear(); | ||||
| auto primReturn = std::make_unique<schema::PrimitiveT>(); | auto primReturn = std::make_unique<schema::PrimitiveT>(); | ||||
| @@ -857,6 +882,10 @@ int AnfImporterFromProtobuf::Import(const schema::QuantType &quantType) { | |||||
| MS_LOG(ERROR) << "Parse configuration info for pb file failed!"; | MS_LOG(ERROR) << "Parse configuration info for pb file failed!"; | ||||
| return status; | return status; | ||||
| } | } | ||||
| if (onnx_model_ == nullptr) { | |||||
| MS_LOG(ERROR) << "onnx_model_ is nullptr"; | |||||
| return RET_NULL_PTR; | |||||
| } | |||||
| const onnx::GraphProto &graphBuild = onnx_model_->graph(); | const onnx::GraphProto &graphBuild = onnx_model_->graph(); | ||||
| status = BuildFuncGraph(dstGraph, graphBuild, quantType); | status = BuildFuncGraph(dstGraph, graphBuild, quantType); | ||||
| if (status != RET_OK) { | if (status != RET_OK) { | ||||
| @@ -871,6 +900,11 @@ int AnfImporterFromProtobuf::Import(const schema::QuantType &quantType) { | |||||
| onnx::ModelProto *AnfImporterFromProtobuf::ReadOnnxFromBinary(const std::string &model_path) { | onnx::ModelProto *AnfImporterFromProtobuf::ReadOnnxFromBinary(const std::string &model_path) { | ||||
| auto onnx_model = new (std::nothrow) onnx::ModelProto; | auto onnx_model = new (std::nothrow) onnx::ModelProto; | ||||
| if (onnx_model == nullptr) { | |||||
| MS_LOG(ERROR) << "New onnx ModelProto failed!"; | |||||
| ReturnCode::GetSingleReturnCode()->UpdateReturnCode(RET_NULL_PTR); | |||||
| return nullptr; | |||||
| } | |||||
| if (RET_OK != ValidateFileStr(model_path, ".mindir")) { | if (RET_OK != ValidateFileStr(model_path, ".mindir")) { | ||||
| MS_LOG(ERROR) << "INPUT ILLEGAL: modelFile must be *.mindir"; | MS_LOG(ERROR) << "INPUT ILLEGAL: modelFile must be *.mindir"; | ||||
| ReturnCode::GetSingleReturnCode()->UpdateReturnCode(RET_INPUT_PARAM_INVALID); | ReturnCode::GetSingleReturnCode()->UpdateReturnCode(RET_INPUT_PARAM_INVALID); | ||||
| @@ -31,7 +31,7 @@ | |||||
| namespace mindspore::lite { | namespace mindspore::lite { | ||||
| class AnfImporterFromProtobuf : public AnfImporter { | class AnfImporterFromProtobuf : public AnfImporter { | ||||
| public: | public: | ||||
| explicit AnfImporterFromProtobuf(onnx::ModelProto *onnx_model, FuncGraphPtr func_graph) | |||||
| AnfImporterFromProtobuf(onnx::ModelProto *onnx_model, FuncGraphPtr func_graph) | |||||
| : onnx_model_(onnx_model), func_graph_(std::move(func_graph)) {} | : onnx_model_(onnx_model), func_graph_(std::move(func_graph)) {} | ||||
| ~AnfImporterFromProtobuf() override = default; | ~AnfImporterFromProtobuf() override = default; | ||||
| @@ -203,7 +203,7 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An | |||||
| auto output_nums = GetOutputTensorNum(input_cnode); | auto output_nums = GetOutputTensorNum(input_cnode); | ||||
| std::vector<Tensor *> output_tensors; | std::vector<Tensor *> output_tensors; | ||||
| for (size_t j = 0; j < output_nums; j++) { | for (size_t j = 0; j < output_nums; j++) { | ||||
| output_tensors.push_back(new Tensor()); | |||||
| output_tensors.push_back(new (std::nothrow) Tensor()); | |||||
| } | } | ||||
| auto lite_primitive = GetValueNode<std::shared_ptr<PrimitiveC>>(input_cnode->input(0)); | auto lite_primitive = GetValueNode<std::shared_ptr<PrimitiveC>>(input_cnode->input(0)); | ||||
| if (lite_primitive == nullptr) { | if (lite_primitive == nullptr) { | ||||
| @@ -32,7 +32,6 @@ const BaseRef ConvActivationFusion::DefinePattern() const { | |||||
| auto prim = new schema::PrimitiveT(); | auto prim = new schema::PrimitiveT(); | ||||
| prim->value.type = primitive_type; | prim->value.type = primitive_type; | ||||
| auto prim_value = std::make_shared<lite::PrimitiveC>(prim); | auto prim_value = std::make_shared<lite::PrimitiveC>(prim); | ||||
| return VectorRef({prim_value, conv_var}); | return VectorRef({prim_value, conv_var}); | ||||
| } | } | ||||
| @@ -25,9 +25,9 @@ namespace mindspore { | |||||
| namespace opt { | namespace opt { | ||||
| class ConvActivationFusion : public PatternProcessPass { | class ConvActivationFusion : public PatternProcessPass { | ||||
| public: | public: | ||||
| explicit ConvActivationFusion(bool multigraph = true, const std::string &name = "conv_activation_fusion", | |||||
| schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU, | |||||
| schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) | |||||
| ConvActivationFusion(bool multigraph = true, const std::string &name = "conv_activation_fusion", | |||||
| schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU, | |||||
| schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) | |||||
| : PatternProcessPass(name, multigraph), primitive_type(primitive), activation_type(activation) {} | : PatternProcessPass(name, multigraph), primitive_type(primitive), activation_type(activation) {} | ||||
| ~ConvActivationFusion() override = default; | ~ConvActivationFusion() override = default; | ||||
| const BaseRef DefinePattern() const override; | const BaseRef DefinePattern() const override; | ||||
| @@ -57,6 +57,11 @@ void CalTransale(const AnfNodePtr &bn_scale_node, const AnfNodePtr &bn_var_node, | |||||
| for (int32_t i = 0; i < kernel_num; i++) { | for (int32_t i = 0; i < kernel_num; i++) { | ||||
| float tmp = trans_scale[i] + eps; | float tmp = trans_scale[i] + eps; | ||||
| tmp = pow(tmp, POW_NUM); | tmp = pow(tmp, POW_NUM); | ||||
| if (tmp <= 0.0f) { | |||||
| MS_LOG(ERROR) << "divisor cannot be 0"; | |||||
| lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_ERROR); | |||||
| return; | |||||
| } | |||||
| trans_scale[i] = 1 / tmp; | trans_scale[i] = 1 / tmp; | ||||
| } | } | ||||
| if (bn_scale_node != nullptr) { | if (bn_scale_node != nullptr) { | ||||
| @@ -42,7 +42,6 @@ const BaseRef ConvScaleFusion::DefinePattern() const { | |||||
| auto bn_var = std::make_shared<CondVar>(IsScaleNode); | auto bn_var = std::make_shared<CondVar>(IsScaleNode); | ||||
| auto weight_var = std::make_shared<CondVar>(IsParamNode); | auto weight_var = std::make_shared<CondVar>(IsParamNode); | ||||
| auto bias_var = std::make_shared<SeqVar>(); | auto bias_var = std::make_shared<SeqVar>(); | ||||
| return VectorRef({bn_var, conv_var, weight_var, bias_var}); | return VectorRef({bn_var, conv_var, weight_var, bias_var}); | ||||
| } | } | ||||
| const void ConvScaleFusion::InitTransParam(const CNodePtr &scale_node, int kernel_num, float *trans_scale, | const void ConvScaleFusion::InitTransParam(const CNodePtr &scale_node, int kernel_num, float *trans_scale, | ||||
| @@ -86,14 +86,12 @@ const AnfNodePtr ConvTransformFusion::Process(const FuncGraphPtr &func_graph, co | |||||
| auto trans_scale = new (std::nothrow) float[kernel_nums]; | auto trans_scale = new (std::nothrow) float[kernel_nums]; | ||||
| if (trans_scale == nullptr) { | if (trans_scale == nullptr) { | ||||
| MS_LOG(ERROR) << "tensor_data is nullptr"; | MS_LOG(ERROR) << "tensor_data is nullptr"; | ||||
| delete[] trans_scale; | |||||
| return nullptr; | return nullptr; | ||||
| } | } | ||||
| auto trans_bias = new (std::nothrow) float[kernel_nums]; | auto trans_bias = new (std::nothrow) float[kernel_nums]; | ||||
| if (trans_bias == nullptr) { | if (trans_bias == nullptr) { | ||||
| MS_LOG(ERROR) << "tensor_data is nullptr"; | MS_LOG(ERROR) << "tensor_data is nullptr"; | ||||
| delete[] trans_scale; | delete[] trans_scale; | ||||
| delete[] trans_bias; | |||||
| return nullptr; | return nullptr; | ||||
| } | } | ||||
| GenTransParam(transform_node, kernel_nums, trans_scale, trans_bias); | GenTransParam(transform_node, kernel_nums, trans_scale, trans_bias); | ||||
| @@ -179,11 +177,10 @@ const void ConvTransformFusion::GenNewConvTensor(const FuncGraphPtr &func_graph, | |||||
| if (kernel_num <= 0) { | if (kernel_num <= 0) { | ||||
| MS_LOG(ERROR) << "kernel num less than 0"; | MS_LOG(ERROR) << "kernel num less than 0"; | ||||
| lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_INVALID_OP_ATTR); | lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_INVALID_OP_ATTR); | ||||
| return; | |||||
| } | } | ||||
| auto kernel_size = weight_tensor->tensor_shape_size() / kernel_num; | auto kernel_size = weight_tensor->tensor_shape_size() / kernel_num; | ||||
| CalNewWeightTensor(weight_data, kernel_num, kernel_size, trans_scale); | CalNewWeightTensor(weight_data, kernel_num, kernel_size, trans_scale); | ||||
| float *bias_data = nullptr; | float *bias_data = nullptr; | ||||
| // conv has bias,bias_flag true | // conv has bias,bias_flag true | ||||
| bool bias_flag = false; | bool bias_flag = false; | ||||
| @@ -196,7 +193,6 @@ const void ConvTransformFusion::GenNewConvTensor(const FuncGraphPtr &func_graph, | |||||
| bias_data = new (std::nothrow) float[kernel_num]; | bias_data = new (std::nothrow) float[kernel_num]; | ||||
| if (bias_data == nullptr) { | if (bias_data == nullptr) { | ||||
| MS_LOG(ERROR) << "tensor_data is nullptr"; | MS_LOG(ERROR) << "tensor_data is nullptr"; | ||||
| delete[] bias_data; | |||||
| return; | return; | ||||
| } | } | ||||
| } | } | ||||
| @@ -211,6 +207,10 @@ const void ConvTransformFusion::CalNewWeightTensor(float *weight_data, int kerne | |||||
| const float *trans_scale) const { | const float *trans_scale) const { | ||||
| MS_ASSERT(weight_data != nullptr); | MS_ASSERT(weight_data != nullptr); | ||||
| auto tmp_weight_data = new (std::nothrow) float[kernel_num * kernel_size]; | auto tmp_weight_data = new (std::nothrow) float[kernel_num * kernel_size]; | ||||
| if (tmp_weight_data == nullptr) { | |||||
| lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_MEMORY_FAILED); | |||||
| return; | |||||
| } | |||||
| MS_ASSERT(new_weight_data != nullptr); | MS_ASSERT(new_weight_data != nullptr); | ||||
| auto data_size = kernel_num * kernel_size * sizeof(float); | auto data_size = kernel_num * kernel_size * sizeof(float); | ||||
| if (0 != memset_s(tmp_weight_data, data_size, 0, data_size)) { | if (0 != memset_s(tmp_weight_data, data_size, 0, data_size)) { | ||||
| @@ -38,7 +38,6 @@ const BaseRef ConvTupleActivationFusion::DefinePattern() const { | |||||
| auto act_prim = new schema::PrimitiveT(); | auto act_prim = new schema::PrimitiveT(); | ||||
| act_prim->value.type = primitive_type; | act_prim->value.type = primitive_type; | ||||
| auto act_value = std::make_shared<lite::PrimitiveC>(act_prim); | auto act_value = std::make_shared<lite::PrimitiveC>(act_prim); | ||||
| return VectorRef({act_value, tuple_get_item}); | return VectorRef({act_value, tuple_get_item}); | ||||
| } | } | ||||
| @@ -25,9 +25,9 @@ namespace mindspore { | |||||
| namespace opt { | namespace opt { | ||||
| class ConvTupleActivationFusion : public PatternProcessPass { | class ConvTupleActivationFusion : public PatternProcessPass { | ||||
| public: | public: | ||||
| explicit ConvTupleActivationFusion(bool multigraph = true, const std::string &name = "conv_tuple_activation_fusion", | |||||
| schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU, | |||||
| schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) | |||||
| ConvTupleActivationFusion(bool multigraph = true, const std::string &name = "conv_tuple_activation_fusion", | |||||
| schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU, | |||||
| schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) | |||||
| : PatternProcessPass(name, multigraph), primitive_type(primitive), activation_type(activation) {} | : PatternProcessPass(name, multigraph), primitive_type(primitive), activation_type(activation) {} | ||||
| ~ConvTupleActivationFusion() override = default; | ~ConvTupleActivationFusion() override = default; | ||||
| const BaseRef DefinePattern() const override; | const BaseRef DefinePattern() const override; | ||||
| @@ -48,7 +48,6 @@ class LayerNormFusion : public PatternProcessPass { | |||||
| VarPtr beta_; | VarPtr beta_; | ||||
| VarPtr epsilon_; | VarPtr epsilon_; | ||||
| }; | }; | ||||
| } // namespace opt | } // namespace opt | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -28,14 +28,13 @@ constexpr size_t kActivationInputsLength = 2; | |||||
| } | } | ||||
| const BaseRef PoolingActivationFusion::DefinePattern() const { | const BaseRef PoolingActivationFusion::DefinePattern() const { | ||||
| auto pooling_var = std::make_shared<CondVar>(IsPoolingNode)(); | auto pooling_var = std::make_shared<CondVar>(IsPoolingNode)(); | ||||
| auto prim = new schema::PrimitiveT(); | |||||
| auto prim = new (std::nothrow) schema::PrimitiveT(); | |||||
| if (prim == nullptr) { | if (prim == nullptr) { | ||||
| MS_LOG(ERROR) << "new primitiveT failed"; | MS_LOG(ERROR) << "new primitiveT failed"; | ||||
| return nullptr; | return nullptr; | ||||
| } | } | ||||
| prim->value.type = primitive_type; | prim->value.type = primitive_type; | ||||
| auto prim_value = std::make_shared<lite::PrimitiveC>(prim); | auto prim_value = std::make_shared<lite::PrimitiveC>(prim); | ||||
| return VectorRef({prim_value, pooling_var}); | return VectorRef({prim_value, pooling_var}); | ||||
| } | } | ||||
| @@ -43,7 +42,6 @@ const AnfNodePtr PoolingActivationFusion::Process(const FuncGraphPtr &func_graph | |||||
| const EquivPtr &) const { | const EquivPtr &) const { | ||||
| MS_LOG(DEBUG) << "pooling activation pass process:" << schema::EnumNamesPrimitiveType()[primitive_type]; | MS_LOG(DEBUG) << "pooling activation pass process:" << schema::EnumNamesPrimitiveType()[primitive_type]; | ||||
| CheckIfFuncGraphIsNull(func_graph); | CheckIfFuncGraphIsNull(func_graph); | ||||
| CheckIfAnfNodeIsNull(node); | CheckIfAnfNodeIsNull(node); | ||||
| auto act_node = node->cast<CNodePtr>(); | auto act_node = node->cast<CNodePtr>(); | ||||
| CheckIfCNodeIsNull(act_node); | CheckIfCNodeIsNull(act_node); | ||||
| @@ -25,9 +25,9 @@ namespace mindspore { | |||||
| namespace opt { | namespace opt { | ||||
| class PoolingActivationFusion : public PatternProcessPass { | class PoolingActivationFusion : public PatternProcessPass { | ||||
| public: | public: | ||||
| explicit PoolingAActivationFusion(bool multigraph = true, const std::string &name = "pooling_activation_fusion", | |||||
| schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU, | |||||
| schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) | |||||
| PoolingAActivationFusion(bool multigraph = true, const std::string &name = "pooling_activation_fusion", | |||||
| schema::PrimitiveType primitive = schema::PrimitiveType_LeakyReLU, | |||||
| schema::ActivationType activation = schema::ActivationType_LEAKY_RELU) | |||||
| : PatternProcessPass(name, multigraph), primitive_type(primitive), activation_type(activation) {} | : PatternProcessPass(name, multigraph), primitive_type(primitive), activation_type(activation) {} | ||||
| ~PoolingAActivationFusion() override = default; | ~PoolingAActivationFusion() override = default; | ||||
| const BaseRef DefinePattern() const override; | const BaseRef DefinePattern() const override; | ||||
| @@ -75,7 +75,7 @@ bool ClipConvertActivationPass::Run(const FuncGraphPtr &graph) { | |||||
| auto primitive = std::make_unique<schema::PrimitiveT>(); | auto primitive = std::make_unique<schema::PrimitiveT>(); | ||||
| MS_ASSERT(primitive != nullptr); | MS_ASSERT(primitive != nullptr); | ||||
| primitive->value.type = schema::PrimitiveType_Activation; | primitive->value.type = schema::PrimitiveType_Activation; | ||||
| auto prim2 = new schema::ActivationT; | |||||
| auto prim2 = new (std::nothrow) schema::ActivationT; | |||||
| MS_ASSERT(prim2 != nullptr); | MS_ASSERT(prim2 != nullptr); | ||||
| if (min == 0 && max == 6) { | if (min == 0 && max == 6) { | ||||
| prim2->type = schema::ActivationType_RELU6; | prim2->type = schema::ActivationType_RELU6; | ||||