From d05857a16c0d482597e9c4a43a40d44da716f28d Mon Sep 17 00:00:00 2001 From: guohongzilong <2713219276@qq.com> Date: Fri, 20 Nov 2020 14:27:01 +0800 Subject: [PATCH] add ptr null check --- .../lite/tools/optimizer/common/gllo_utils.cc | 14 +++++++++++++- .../tools/optimizer/common/pass_manager_extends.cc | 3 +++ .../optimizer/fusion/constant_folding_fusion.cc | 2 ++ .../optimizer/fusion/conv_activation_fusion.cc | 2 ++ .../tools/optimizer/fusion/conv_biasadd_fusion.cc | 5 +++++ .../lite/tools/optimizer/fusion/conv_bn_fusion.cc | 2 ++ .../tools/optimizer/fusion/conv_scale_fusion.cc | 2 ++ .../optimizer/fusion/conv_transform_fusion.cc | 3 +++ .../fusion/conv_tuple_activation_fusion.cc | 2 ++ .../tools/optimizer/fusion/layer_norm_fusion.cc | 2 ++ .../optimizer/fusion/pooling_activation_fusion.cc | 2 ++ .../optimizer/fusion/quant_dtype_cast_fusion.cc | 2 ++ 12 files changed, 40 insertions(+), 1 deletion(-) diff --git a/mindspore/lite/tools/optimizer/common/gllo_utils.cc b/mindspore/lite/tools/optimizer/common/gllo_utils.cc index e3af1de4c0..f19679dd60 100644 --- a/mindspore/lite/tools/optimizer/common/gllo_utils.cc +++ b/mindspore/lite/tools/optimizer/common/gllo_utils.cc @@ -112,6 +112,10 @@ VarNodePtr CreateVarNodeWithSexp(const BaseRef &sexp, const BaseRef &graph) { AnfNodePtr HandleSexpVector(const BaseRef &sexp, const BaseRef &graph, PrimitiveVarMap *primitive_vars, bool multigraph) { + if (primitive_vars == nullptr) { + lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_NULL_PTR); + return nullptr; + } MS_LOG(DEBUG) << "HandleSexpVector sexp: " + sexp.ToString() + ", graph " + graph.ToString(); std::vector input_nodes; const auto &tuple = utils::cast(sexp); @@ -499,6 +503,10 @@ bool IsQuantNode(const BaseRef &n) { } bool CheckIsAllInputsParam(const AnfNodePtr &node) { + if (node == nullptr) { + lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_NULL_PTR); + return 0; + } if (utils::isa(node)) { auto cnode = node->cast(); for (size_t i = 1; i < cnode->inputs().size(); i++) { @@ -537,6 +545,10 @@ size_t GetOutputTensorNum(const AnfNodePtr &node) { } bool IsMultiOutputTensors(const FuncGraphPtr &graph, const AnfNodePtr &node) { + if (node == nullptr || graph == nullptr) { + lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_NULL_PTR); + return 0; + } auto output_node_list = GetRealNodeUsedList(graph, node); if (output_node_list->size() != 1) { MS_LOG(DEBUG) << "fusion node has multi output nodes"; @@ -548,7 +560,7 @@ bool IsMultiOutputTensors(const FuncGraphPtr &graph, const AnfNodePtr &node) { std::shared_ptr>> GetRealNodeUsedList(const FuncGraphPtr &graph, const AnfNodePtr &node) { auto output_node_list = std::make_shared>>(); - if (graph == nullptr) { + if (graph == nullptr || node == nullptr) { lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_NULL_PTR); return nullptr; } diff --git a/mindspore/lite/tools/optimizer/common/pass_manager_extends.cc b/mindspore/lite/tools/optimizer/common/pass_manager_extends.cc index c437619042..0edabe31eb 100644 --- a/mindspore/lite/tools/optimizer/common/pass_manager_extends.cc +++ b/mindspore/lite/tools/optimizer/common/pass_manager_extends.cc @@ -75,6 +75,9 @@ bool PassManager::Run(const FuncGraphPtr &func_graph, const std::vector } bool PassManager::Run(const FuncGraphPtr &func_graph) const { + if (func_graph == nullptr) { + return false; + } bool changed = false; // run all passes bool change = true; diff --git a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc index 20f6c53ae3..21d7302040 100644 --- a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc @@ -76,6 +76,8 @@ std::vector GetCNodeInputTensors(const CNodePtr &CNode) { } ParameterPtr CreateNewParamter(const FuncGraphPtr &func_graph, Tensor *tensor) { + MS_ASSERT(func_graph != nullptr); + MS_ASSERT(tensor != nullptr); auto parameter = func_graph->add_parameter(); std::vector shape(tensor->shape()); std::vector shape_vector; diff --git a/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.cc index cc1b4e6267..2b7c6a4995 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.cc @@ -38,6 +38,8 @@ const BaseRef ConvActivationFusion::DefinePattern() const { const AnfNodePtr ConvActivationFusion::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node, const EquivPtr &) const { + MS_ASSERT(func_graph != nullptr); + MS_ASSERT(node != nullptr); MS_LOG(DEBUG) << "conv activation pass process:" << schema::EnumNamesPrimitiveType()[primitive_type]; if (CheckIfFuncGraphIsNull(func_graph) != lite::RET_OK || CheckIfAnfNodeIsNull(node) != lite::RET_OK) { lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_NULL_PTR); diff --git a/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc index 0d5c5bc948..2500658776 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc @@ -81,6 +81,9 @@ int Get_Kenrnel_nums(const CNodePtr &conv_node) { } } int GenConvNewBias(const FuncGraphPtr &func_graph, const CNodePtr &conv_node, const CNodePtr &bias_node) { + MS_ASSERT(func_graph != nullptr); + MS_ASSERT(conv_node != nullptr); + MS_ASSERT(bias_node != nullptr); AnfNodePtr conv_bias_node = nullptr; AnfNodePtr conv_weight_node = nullptr; if (conv_node->inputs().size() == kConvNoBiasLen) { @@ -158,6 +161,8 @@ const BaseRef ConvBiasaddFusion::DefinePattern() const { const AnfNodePtr ConvBiasaddFusion::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node, const EquivPtr &) const { + MS_ASSERT(func_graph != nullptr); + MS_ASSERT(node != nullptr); MS_LOG(DEBUG) << "Enter pass process"; if (CheckIfFuncGraphIsNull(func_graph) != lite::RET_OK || CheckIfAnfNodeIsNull(node) != lite::RET_OK) { return nullptr; diff --git a/mindspore/lite/tools/optimizer/fusion/conv_bn_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_bn_fusion.cc index 6b111d0fb1..e9e14e9e56 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_bn_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_bn_fusion.cc @@ -111,6 +111,8 @@ const BaseRef ConvBatchNormFusion::DefinePattern() const { const void ConvBatchNormFusion::InitTransParam(const CNodePtr &bn_node, int kernel_num, float *trans_scale, float *trans_bias) const { MS_ASSERT(bn_node != nullptr); + MS_ASSERT(trans_bias != nullptr); + MS_ASSERT(trans_scale != nullptr); AnfNodePtr bn_mean_node = nullptr; AnfNodePtr bn_variance_node = nullptr; AnfNodePtr bn_scale_node = nullptr; diff --git a/mindspore/lite/tools/optimizer/fusion/conv_scale_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_scale_fusion.cc index bb4bd9bd53..3ebb96210b 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_scale_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_scale_fusion.cc @@ -48,6 +48,8 @@ const BaseRef ConvScaleFusion::DefinePattern() const { const void ConvScaleFusion::InitTransParam(const CNodePtr &scale_node, int kernel_num, float *trans_scale, float *trans_bias) const { MS_ASSERT(scale_node != nullptr); + MS_ASSERT(trans_bias != nullptr); + MS_ASSERT(trans_scale != nullptr); AnfNodePtr scale_weight_node; AnfNodePtr scale_bias_node; if (scale_node->inputs().size() == kScaleNoBiasLen) { diff --git a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc index 208edf15b0..a5ad384f08 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc @@ -210,6 +210,7 @@ const void ConvTransformFusion::GenNewConvTensor(const FuncGraphPtr &func_graph, const void ConvTransformFusion::CalNewWeightTensor(float *weight_data, int kernel_num, int kernel_size, const float *trans_scale) const { MS_ASSERT(weight_data != nullptr); + MS_ASSERT(trans_scale != nullptr); auto tmp_weight_data = new (std::nothrow) float[kernel_num * kernel_size]; MS_ASSERT(new_weight_data != nullptr); auto data_size = kernel_num * kernel_size * sizeof(float); @@ -239,6 +240,8 @@ const void ConvTransformFusion::CalNewWeightTensor(float *weight_data, int kerne const void ConvTransformFusion::CalNewBiasTensor(float *bias_data, int kernel_num, bool bias_flag, const float *trans_scale, const float *trans_bias) { MS_ASSERT(bias_data != nullptr); + MS_ASSERT(trans_bias != nullptr); + MS_ASSERT(trans_scale != nullptr); if (bias_flag) { auto tmp_bias_data = new (std::nothrow) float[kernel_num]; if (tmp_bias_data == nullptr) { diff --git a/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc index d8ed8a6622..b9ed47c41a 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc @@ -44,6 +44,8 @@ const BaseRef ConvTupleActivationFusion::DefinePattern() const { const AnfNodePtr ConvTupleActivationFusion::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node, const EquivPtr &) const { + MS_ASSERT(func_graph != nullptr); + MS_ASSERT(node != nullptr); MS_LOG(DEBUG) << "conv tuple activation pass process:" << schema::EnumNamesPrimitiveType()[primitive_type]; if (CheckIfFuncGraphIsNull(func_graph) != lite::RET_OK || CheckIfAnfNodeIsNull(node) != lite::RET_OK) { return nullptr; diff --git a/mindspore/lite/tools/optimizer/fusion/layer_norm_fusion.cc b/mindspore/lite/tools/optimizer/fusion/layer_norm_fusion.cc index 53e3faba1f..061c3e57ad 100644 --- a/mindspore/lite/tools/optimizer/fusion/layer_norm_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/layer_norm_fusion.cc @@ -137,6 +137,8 @@ CNodePtr LayerNormFusion::CreateLayerNormNode(const FuncGraphPtr &func_graph, co const AnfNodePtr LayerNormFusion::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node, const EquivPtr &equiv) const { + MS_ASSERT(func_graph != nullptr); + MS_ASSERT(node != nullptr); MS_LOG(DEBUG) << "layer_norm pass"; if (CheckIfFuncGraphIsNull(func_graph) != lite::RET_OK || CheckIfAnfNodeIsNull(node) != lite::RET_OK) { lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_NULL_PTR); diff --git a/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.cc b/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.cc index acd3579ea2..843fb0ff58 100644 --- a/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.cc @@ -41,6 +41,8 @@ const BaseRef PoolingActivationFusion::DefinePattern() const { const AnfNodePtr PoolingActivationFusion::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node, const EquivPtr &) const { + MS_ASSERT(func_graph != nullptr); + MS_ASSERT(node != nullptr); MS_LOG(DEBUG) << "pooling activation pass process:" << schema::EnumNamesPrimitiveType()[primitive_type]; CheckIfFuncGraphIsNull(func_graph); diff --git a/mindspore/lite/tools/optimizer/fusion/quant_dtype_cast_fusion.cc b/mindspore/lite/tools/optimizer/fusion/quant_dtype_cast_fusion.cc index b5c262f8ec..c6749c7968 100644 --- a/mindspore/lite/tools/optimizer/fusion/quant_dtype_cast_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/quant_dtype_cast_fusion.cc @@ -33,6 +33,8 @@ const BaseRef QuantDtypeCastFusion::DefinePattern() const { const AnfNodePtr QuantDtypeCastFusion::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node, const EquivPtr &) const { + MS_ASSERT(func_graph != nullptr); + MS_ASSERT(node != nullptr); MS_LOG(DEBUG) << "quant dtype cast fusion pass process"; if (CheckIfFuncGraphIsNull(func_graph) != lite::RET_OK || CheckIfAnfNodeIsNull(node) != lite::RET_OK) { return nullptr;