From dbd62c0ef7690628c0419d715577316408d70aae Mon Sep 17 00:00:00 2001 From: yvette Date: Thu, 19 Nov 2020 16:37:15 +0800 Subject: [PATCH] modify static check --- .../lite/tools/anf_exporter/anf_exporter.cc | 22 +-- .../lite/tools/anf_exporter/anf_exporter.h | 23 +-- .../lite/tools/anf_importer/anf_importer.cc | 2 - .../lite/tools/anf_importer/anf_importer.h | 6 +- .../anf_importer/import_from_meta_graphT.cc | 22 +-- .../anf_importer/import_from_meta_graphT.h | 10 +- .../anf_importer/import_from_protobuf.cc | 5 +- .../tools/anf_importer/import_from_protobuf.h | 6 +- mindspore/lite/tools/common/flag_parser.cc | 21 ++- mindspore/lite/tools/common/flag_parser.h | 14 +- mindspore/lite/tools/common/graph_util.cc | 80 ++++++---- mindspore/lite/tools/common/graph_util.h | 17 +- mindspore/lite/tools/common/node_util.cc | 8 +- mindspore/lite/tools/common/node_util.h | 15 +- mindspore/lite/tools/common/option.h | 8 +- mindspore/lite/tools/common/protobuf_utils.h | 6 +- mindspore/lite/tools/common/storage.cc | 4 +- mindspore/lite/tools/common/storage.h | 10 +- mindspore/lite/tools/common/tensor_util.cc | 1 - mindspore/lite/tools/common/tensor_util.h | 16 +- .../parser/caffe/caffe_argmax_parser.cc | 5 +- .../parser/caffe/caffe_argmax_parser.h | 3 +- .../parser/caffe/caffe_batchnorm_parser.cc | 37 ++--- .../parser/caffe/caffe_batchnorm_parser.h | 1 + .../parser/caffe/caffe_concat_parser.cc | 14 +- .../parser/caffe/caffe_concat_parser.h | 2 +- .../parser/caffe/caffe_conv_base_parser.cc | 66 ++++---- .../parser/caffe/caffe_conv_base_parser.h | 18 +-- .../parser/caffe/caffe_convolution_parser.cc | 23 +-- .../parser/caffe/caffe_convolution_parser.h | 4 +- .../parser/caffe/caffe_crop_parser.cc | 13 +- .../parser/caffe/caffe_crop_parser.h | 2 +- .../caffe/caffe_deconvolution_parser.cc | 23 +-- .../parser/caffe/caffe_deconvolution_parser.h | 4 +- .../parser/caffe/caffe_eltwise_parser.cc | 11 +- .../parser/caffe/caffe_eltwise_parser.h | 2 +- .../parser/caffe/caffe_elu_parser.cc | 2 +- .../converter/parser/caffe/caffe_elu_parser.h | 2 +- .../parser/caffe/caffe_exp_parser.cc | 2 +- .../converter/parser/caffe/caffe_exp_parser.h | 2 +- .../parser/caffe/caffe_flatten_parser.h | 2 +- .../parser/caffe/caffe_innerproduct_parser.cc | 8 +- .../parser/caffe/caffe_innerproduct_parser.h | 2 +- .../converter/parser/caffe/caffe_inspector.cc | 6 +- .../parser/caffe/caffe_interp_parser.cc | 2 +- .../parser/caffe/caffe_interp_parser.h | 2 +- .../parser/caffe/caffe_node_parser.cc | 16 +- .../caffe/caffe_node_parser_registry.cc | 2 +- .../parser/caffe/caffe_permute_parser.cc | 2 +- .../parser/caffe/caffe_permute_parser.h | 2 +- .../parser/caffe/caffe_pooling_parser.cc | 14 +- .../parser/caffe/caffe_pooling_parser.h | 10 +- .../parser/caffe/caffe_power_parser.cc | 18 +-- .../parser/caffe/caffe_power_parser.h | 2 +- .../parser/caffe/caffe_prelu_parser.cc | 7 +- .../parser/caffe/caffe_prelu_parser.h | 2 +- .../parser/caffe/caffe_reduce_parser.cc | 3 +- .../parser/caffe/caffe_reduce_parser.h | 2 +- .../parser/caffe/caffe_relu6_parser.cc | 2 - .../parser/caffe/caffe_relu6_parser.h | 2 +- .../parser/caffe/caffe_reshape_parser.cc | 2 +- .../parser/caffe/caffe_reshape_parser.h | 2 +- .../parser/caffe/caffe_scale_parser.cc | 17 +- .../parser/caffe/caffe_scale_parser.h | 4 +- .../parser/caffe/caffe_sigmoid_parser.h | 2 +- .../parser/caffe/caffe_slice_parser.cc | 3 +- .../parser/caffe/caffe_slice_parser.h | 2 +- .../parser/caffe/caffe_softmax_parser.cc | 6 +- .../parser/caffe/caffe_softmax_parser.h | 2 +- .../parser/caffe/caffe_tanh_parser.h | 2 +- .../parser/caffe/caffe_tile_parser.cc | 2 +- .../parser/caffe/caffe_tile_parser.h | 2 +- .../onnx/onnx_arithmetic_operation_parser.cc | 2 +- .../onnx/onnx_arithmetic_operation_parser.h | 7 - .../parser/onnx/onnx_biasadd_parser.cc | 1 - .../onnx/onnx_constant_of_shape_parser.cc | 2 +- .../converter/parser/onnx/onnx_conv_parser.cc | 9 +- .../converter/parser/onnx/onnx_converter.h | 2 +- .../parser/onnx/onnx_deconv_parser.cc | 8 +- .../parser/onnx/onnx_deconv_parser.h | 2 +- .../parser/onnx/onnx_expand_parser.cc | 3 +- .../parser/onnx/onnx_lp_norm_parser.h | 2 +- .../converter/parser/onnx/onnx_lstm_parser.cc | 2 +- .../converter/parser/onnx/onnx_node_parser.h | 9 +- .../parser/onnx/onnx_node_parser_registry.cc | 7 - .../converter/parser/onnx/onnx_pool_parser.cc | 1 - .../converter/parser/onnx/onnx_relu_parser.cc | 4 +- .../converter/parser/onnx/onnx_relu_parser.h | 6 - .../converter/parser/onnx/onnx_topk_parser.cc | 1 - .../parser/onnx/onnx_transpose_parser.cc | 8 +- .../parser/onnx/onnx_upsample_parser.cc | 1 - .../parser/tflite/tflite_activation_parser.cc | 21 ++- .../parser/tflite/tflite_activation_parser.h | 35 ----- .../parser/tflite/tflite_addn_parser.cc | 9 +- .../parser/tflite/tflite_argmax_parser.cc | 12 +- .../parser/tflite/tflite_argmin_parser.cc | 12 +- .../parser/tflite/tflite_arithmetic_parser.cc | 85 ++++++---- .../parser/tflite/tflite_arithmetic_parser.h | 145 ------------------ .../tflite/tflite_batch_to_space_parser.cc | 8 +- .../tflite/tflite_batch_to_space_parser.h | 5 - .../tflite/tflite_broadcast_to_parser.cc | 4 +- .../parser/tflite/tflite_cast_parser.cc | 4 +- .../parser/tflite/tflite_concat_parser.cc | 8 +- .../parser/tflite/tflite_conv_parser.cc | 4 +- .../parser/tflite/tflite_custom_parser.cc | 23 ++- .../parser/tflite/tflite_custom_parser.h | 46 +++--- .../parser/tflite/tflite_deconv_parser.cc | 4 +- .../tflite/tflite_depth_to_space_parser.cc | 5 +- .../tflite/tflite_depthwise_conv_parser.cc | 4 +- .../parser/tflite/tflite_dequantize_parser.cc | 4 +- .../tflite/tflite_expand_dims_parser.cc | 5 +- .../parser/tflite/tflite_fill_parser.cc | 6 +- .../tflite/tflite_fullyconnected_parser.cc | 6 +- .../tflite/tflite_fullyconnected_parser.h | 5 - .../parser/tflite/tflite_gather_nd_parser.cc | 8 +- .../parser/tflite/tflite_gather_parser.cc | 8 +- .../tflite/tflite_hashtable_lookup_parser.cc | 12 +- .../parser/tflite/tflite_l2norm_parser.cc | 5 +- .../parser/tflite/tflite_logical_parser.cc | 17 +- .../parser/tflite/tflite_logical_parser.h | 21 +-- .../parser/tflite/tflite_lrn_parser.cc | 4 +- .../tflite/tflite_lsh_projection_parser.cc | 8 +- .../parser/tflite/tflite_model_parser.cc | 107 ++++++++++--- .../parser/tflite/tflite_model_parser.h | 8 +- .../parser/tflite/tflite_node_parser.h | 32 ++-- .../tflite/tflite_node_parser_registry.cc | 2 +- .../parser/tflite/tflite_one_hot_parser.cc | 21 +-- .../parser/tflite/tflite_pad_parser.cc | 6 +- .../parser/tflite/tflite_pooling_parser.cc | 12 +- .../parser/tflite/tflite_pooling_parser.h | 16 +- .../parser/tflite/tflite_prelu_parser.cc | 4 +- .../parser/tflite/tflite_quantize_parser.cc | 4 +- .../parser/tflite/tflite_range_parser.cc | 4 +- .../parser/tflite/tflite_rank_parser.cc | 4 +- .../parser/tflite/tflite_reduce_parser.cc | 23 ++- .../parser/tflite/tflite_reduce_parser.h | 30 ---- .../parser/tflite/tflite_reshape_parser.cc | 8 +- .../parser/tflite/tflite_reshape_parser.h | 2 +- .../parser/tflite/tflite_resize_parser.cc | 9 +- .../parser/tflite/tflite_resize_parser.h | 10 -- .../parser/tflite/tflite_reverse_parser.cc | 4 +- .../tflite/tflite_reverse_sequence_parser.cc | 4 +- .../parser/tflite/tflite_scatter_nd_parser.cc | 9 +- .../parser/tflite/tflite_shape_parser.cc | 6 +- .../parser/tflite/tflite_skip_gram_parser.cc | 6 +- .../parser/tflite/tflite_slice_parser.cc | 4 +- .../parser/tflite/tflite_softmax_parser.cc | 3 + .../parser/tflite/tflite_softmax_parser.h | 6 +- .../tflite/tflite_space_to_batch_nd_parser.cc | 4 +- .../tflite/tflite_space_to_depth_parser.cc | 4 +- .../tflite/tflite_sparse_to_dense_parser.cc | 5 +- .../parser/tflite/tflite_split_parser.cc | 14 +- .../parser/tflite/tflite_split_v_parser.cc | 17 +- .../parser/tflite/tflite_squeeze_parser.cc | 6 +- .../parser/tflite/tflite_stack_parser.cc | 8 +- .../tflite/tflite_strided_slice_parser.cc | 5 +- .../parser/tflite/tflite_tile_parser.cc | 4 +- .../parser/tflite/tflite_topk_v2_parser.cc | 7 +- .../parser/tflite/tflite_transpose_parser.cc | 4 +- .../parser/tflite/tflite_unique_parser.cc | 8 +- .../parser/tflite/tflite_unstack_parser.cc | 8 +- .../converter/parser/tflite/tflite_util.cc | 7 +- .../parser/tflite/tflite_where_parser.cc | 8 +- .../parser/tflite/tflite_while_parser.cc | 12 +- .../parser/tflite/tflite_while_parser.h | 2 +- .../parser/tflite/tflite_zeros_like_parser.cc | 4 +- mindspore/lite/tools/schema_gen/schema_gen.h | 2 +- .../lite/tools/schema_gen/schema_type_def.h | 1 + .../tools/schema_gen/schema_type_register.h | 4 +- 169 files changed, 871 insertions(+), 911 deletions(-) diff --git a/mindspore/lite/tools/anf_exporter/anf_exporter.cc b/mindspore/lite/tools/anf_exporter/anf_exporter.cc index 7ce2a813b1..c56456c820 100644 --- a/mindspore/lite/tools/anf_exporter/anf_exporter.cc +++ b/mindspore/lite/tools/anf_exporter/anf_exporter.cc @@ -89,7 +89,7 @@ void AnfExporter::RemoveIfDepend(const CNodePtr &cnode) { } int AnfExporter::ConvertQuantParam(const std::unique_ptr &meta_graph, - const std::shared_ptr primitive, + const std::shared_ptr &primitive, const std::unique_ptr &dst_node) { MS_ASSERT(meta_graph != nullptr); MS_ASSERT(primitive != nullptr); @@ -173,7 +173,7 @@ void AnfExporter::SetGraphInputIndex(const std::unique_ptr & int AnfExporter::SetGraphoutputIndex(const CNodePtr &cnode, const std::unique_ptr &meta_graphT, schema::CNodeT *return_node) { - MS_ASSERT(nullptr != meta_graph); + MS_ASSERT(nullptr != meta_graphT); MS_ASSERT(nullptr != return_node); for (size_t i = 1; i < cnode->inputs().size(); i++) { auto input_node = cnode->input(i); @@ -191,8 +191,8 @@ int AnfExporter::SetGraphoutputIndex(const CNodePtr &cnode, const std::unique_pt return RET_ERROR; } } - for (size_t i = 0; i < return_node->inputIndex.size(); ++i) { - meta_graphT->outputIndex.push_back(return_node->inputIndex[i]); + for (unsigned int &i : return_node->inputIndex) { + meta_graphT->outputIndex.push_back(i); } return RET_OK; } @@ -272,7 +272,7 @@ schema::MetaGraphT *AnfExporter::Export(const FuncGraphPtr &func_graph, bool kee return meta_graphT.release(); } -int AnfExporter::ConvertInputCNode(const std::shared_ptr input_anode, schema::CNodeT *output_cnode) { +int AnfExporter::ConvertInputCNode(const std::shared_ptr &input_anode, schema::CNodeT *output_cnode) { std::string input_name = input_anode->fullname_with_scope(); auto input_cnode = utils::cast(input_anode); @@ -336,7 +336,7 @@ int AnfExporter::ConvertInputCNode(const std::shared_ptr input_anode, s return RET_OK; } -int AnfExporter::ConvertInputParameter(const std::shared_ptr input_anode, +int AnfExporter::ConvertInputParameter(const std::shared_ptr &input_anode, const std::unique_ptr &meta_graphT, schema::CNodeT *output_cnode) { auto paramNode = input_anode->cast(); @@ -382,7 +382,7 @@ int AnfExporter::ConvertInputParameter(const std::shared_ptr input_anod return RET_OK; } -int AnfExporter::ConvertInputValueNode(std::shared_ptr input_anode, +int AnfExporter::ConvertInputValueNode(const std::shared_ptr &input_anode, const std::unique_ptr &meta_graphT, schema::CNodeT *output_cnode) { auto valueNode = input_anode->cast(); @@ -478,7 +478,7 @@ int AnfExporter::ConvertInputValueNode(std::shared_ptr input_anode, int AnfExporter::SetOpInputNode(const CNodePtr &cnode, const std::unique_ptr &meta_graphT, schema::CNodeT *fb_node) { - MS_ASSERT(nullptr != meta_graph); + MS_ASSERT(nullptr != meta_graphT); MS_ASSERT(nullptr != fb_node); if (cnode->inputs().size() <= 1) { return RET_OK; @@ -518,14 +518,14 @@ int AnfExporter::SetOpInputNode(const CNodePtr &cnode, const std::unique_ptr &meta_graphT, schema::CNodeT *fb_node) { - MS_ASSERT(nullptr != graph); + MS_ASSERT(nullptr != meta_graphT); MS_ASSERT(nullptr != fb_node); std::string cnode_name = fb_node->name; if (utils::isa(cnode->abstract())) { auto tuple = std::reinterpret_pointer_cast(cnode->abstract()); for (size_t i = 0; i < tuple->size(); i++) { - auto msTensor = new schema::TensorT(); + auto msTensor = new (std::nothrow) schema::TensorT(); msTensor->nodeType = schema::NodeType_CNode; fb_node->outputIndex.emplace_back(meta_graphT->allTensors.size()); #ifdef SUPPORT_TRAIN @@ -552,7 +552,7 @@ void AnfExporter::SetOpOutputNode(const CNodePtr &cnode, const std::unique_ptrnodeType = schema::NodeType_CNode; ms_tensor->dataType = TypeId::kNumberTypeFloat32; fb_node->outputIndex.emplace_back(meta_graphT->allTensors.size()); diff --git a/mindspore/lite/tools/anf_exporter/anf_exporter.h b/mindspore/lite/tools/anf_exporter/anf_exporter.h index f8d5011f48..b2ce32a970 100644 --- a/mindspore/lite/tools/anf_exporter/anf_exporter.h +++ b/mindspore/lite/tools/anf_exporter/anf_exporter.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MINDSPORE_LITE_SRC_ANF_EXPORTER_ANF_EXPORTER_H_ -#define MINDSPORE_LITE_SRC_ANF_EXPORTER_ANF_EXPORTER_H_ +#ifndef MINDSPORE_LITE_TOOLS_COMMON_ANF_EXPORTER_ANF_EXPORTER_H_ +#define MINDSPORE_LITE_TOOLS_COMMON_ANF_EXPORTER_ANF_EXPORTER_H_ #include #include @@ -36,21 +36,22 @@ class AnfExporter { schema::CNodeT *fb_node); int SetOpInputNode(const CNodePtr &cnode, const std::unique_ptr &meta_graphT, schema::CNodeT *fb_node); - void RemoveIfMakeTuple(const CNodePtr &cnode); - void RemoveIfDepend(const CNodePtr &cnode); + static void RemoveIfMakeTuple(const CNodePtr &cnode); + static void RemoveIfDepend(const CNodePtr &cnode); protected: - int ConvertInputCNode(const std::shared_ptr input_anode, schema::CNodeT *output_cnode); - int ConvertInputParameter(const std::shared_ptr input_anode, + int ConvertInputCNode(const std::shared_ptr &input_anode, schema::CNodeT *output_cnode); + int ConvertInputParameter(const std::shared_ptr &input_anode, const std::unique_ptr &meta_graphT, schema::CNodeT *output_cnode); - int ConvertInputValueNode(std::shared_ptr input_anode, + int ConvertInputValueNode(const std::shared_ptr &input_anode, const std::unique_ptr &meta_graphT, schema::CNodeT *output_cnode); void SetGraphInputIndex(const std::unique_ptr &meta_graphT); int SetGraphoutputIndex(const CNodePtr &cnode, const std::unique_ptr &meta_graphT, schema::CNodeT *return_node); - bool IsPrimitiveCNode(const AnfNodePtr &node, schema::PrimitiveType type); - int ConvertQuantParam(const std::unique_ptr &meta_graph, - const std::shared_ptr primitive, const std::unique_ptr &dst_node); + static bool IsPrimitiveCNode(const AnfNodePtr &node, schema::PrimitiveType type); + static int ConvertQuantParam(const std::unique_ptr &meta_graph, + const std::shared_ptr &primitive, + const std::unique_ptr &dst_node); private: std::map node_id_map_; @@ -62,4 +63,4 @@ class AnfExporter { // and clear. schema::MetaGraphT *Export(const FuncGraphPtr &func_graph, bool keep_graph = false, bool copy_primitive = false); } // namespace mindspore::lite -#endif // MINDSPORE_LITE_SRC_ANF_EXPORTER_ANF_EXPORTER_H_ +#endif // MINDSPORE_LITE_TOOLS_COMMON_ANF_EXPORTER_ANF_EXPORTER_H_ diff --git a/mindspore/lite/tools/anf_importer/anf_importer.cc b/mindspore/lite/tools/anf_importer/anf_importer.cc index b5a55c266c..789a007b55 100644 --- a/mindspore/lite/tools/anf_importer/anf_importer.cc +++ b/mindspore/lite/tools/anf_importer/anf_importer.cc @@ -15,8 +15,6 @@ */ #include -#include -#include #include "tools/anf_importer/anf_importer.h" #include "schema/model_generated.h" #include "ir/dtype.h" diff --git a/mindspore/lite/tools/anf_importer/anf_importer.h b/mindspore/lite/tools/anf_importer/anf_importer.h index 8ac3406db3..de15ead15f 100644 --- a/mindspore/lite/tools/anf_importer/anf_importer.h +++ b/mindspore/lite/tools/anf_importer/anf_importer.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MINDSPORE_LITE_SRC_ANF_IMPORTER_ANF_IMPORTER_H_ -#define MINDSPORE_LITE_SRC_ANF_IMPORTER_ANF_IMPORTER_H_ +#ifndef MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_ANF_IMPORTER_H_ +#define MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_ANF_IMPORTER_H_ #include #include "ir/func_graph.h" @@ -51,4 +51,4 @@ class AnfImporter { }; } // namespace mindspore::lite -#endif // MINDSPORE_LITE_SRC_ANF_IMPORTER_ANF_IMPORTER_H_ +#endif // MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_ANF_IMPORTER_H_ diff --git a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc index fb558104cc..3f485f15a6 100644 --- a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc +++ b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc @@ -22,7 +22,6 @@ #include "src/param_value_lite.h" #include "src/common/log_adapter.h" #include "include/errorcode.h" -#include "tools/common/tensor_util.h" namespace mindspore::lite { int AnfImporterFromMetaGraphT::ConverterConstTensor() { @@ -31,11 +30,9 @@ int AnfImporterFromMetaGraphT::ConverterConstTensor() { for (size_t i = 0; i < meta_graph_->allTensors.size(); i++) { auto &tensor = meta_graph_->allTensors.at(i); MS_ASSERT(tensor != nullptr); - // converter weight and graph input into parameter node if (tensor->nodeType != schema::NodeType::NodeType_ValueNode) { continue; } - MS_ASSERT(tensor->dims() != nullptr); auto parameter = func_graph_->add_parameter(); std::vector shape(tensor->dims.size()); std::copy(tensor->dims.begin(), tensor->dims.end(), shape.begin()); @@ -45,11 +42,12 @@ int AnfImporterFromMetaGraphT::ConverterConstTensor() { (void)std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector), [](const int32_t &value) { return static_cast(value); }); auto abstract_tensor = std::make_shared(type_ptr, shape_vector); + MS_ASSERT(nullptr != abstract_tensor); parameter->set_abstract(abstract_tensor); parameter->set_name("const_" + std::to_string(i) + "_parameter"); ParamValueLitePtr param_value = std::make_shared(); - MS_ASSERT(param_value != nullptr); + MS_ASSERT(nullptr != param_value); param_value->set_tensor_shape(shape); param_value->set_tensor_type(type_id); param_value->set_format(tensor->format); @@ -123,7 +121,9 @@ abstract::AbstractTensorPtr AnfImporterFromMetaGraphT::ConvertTensorToAbstractTe std::vector shape_vector; (void)std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector), [](const int32_t &value) { return static_cast(value); }); - return std::make_shared(type_ptr, shape_vector); + auto ptr = std::make_shared(type_ptr, shape_vector); + MS_ASSERT(nullptr != ptr); + return ptr; } int AnfImporterFromMetaGraphT::ConvertAbstract(const std::unique_ptr &src_cnode, @@ -175,15 +175,16 @@ int AnfImporterFromMetaGraphT::ConverterCNode() { return RET_NULL_PTR; } std::vector op_inputs = {anf_primitive}; - for (unsigned int j : cNode->inputIndex) { + for (int j : cNode->inputIndex) { auto node = GetNode(j); if (nullptr == node) { MS_LOG(ERROR) << "Can't find input node."; - return RET_ERROR; + return RET_NULL_PTR; } op_inputs.push_back(node); } auto new_cnode = func_graph_->NewCNode(op_inputs); + MS_ASSERT(nullptr != new_cnode); new_cnode->set_fullname_with_scope(cNode->name); auto status = ConvertAbstract(cNode, new_cnode); if (status != RET_OK) { @@ -195,10 +196,8 @@ int AnfImporterFromMetaGraphT::ConverterCNode() { } int AnfImporterFromMetaGraphT::AddReturnCNode() { - if (meta_graph_ == nullptr || func_graph_ == nullptr) { - MS_LOG(ERROR) << "meta_graph or func_graph is nullptr"; - return RET_NULL_PTR; - } + MS_ASSERT(nullptr != meta_graph_); + MS_ASSERT(nullptr != func_graph_); if (meta_graph_->outputIndex.size() > 1) { std::vector make_tuple_inputs; auto make_tuple_prim_ptr = GetMakeTuplePrim(); @@ -229,6 +228,7 @@ int AnfImporterFromMetaGraphT::AddReturnCNode() { op_inputs.emplace_back(value_node); op_inputs.emplace_back(make_tuple_cnode); auto cnode = func_graph_->NewCNode(op_inputs); + MS_ASSERT(nullptr != cnode); cnode->set_fullname_with_scope("return"); func_graph_->set_return(cnode); } else { diff --git a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.h b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.h index 960c14a09f..0e16c3742e 100644 --- a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.h +++ b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MINDSPORE_LITE_SRC_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_ -#define MINDSPORE_LITE_SRC_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_ +#ifndef MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_ +#define MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_ #include #include @@ -40,7 +40,9 @@ class AnfImporterFromMetaGraphT : public AnfImporter { int ConverterCNode() override; ValueNodePtr ConvertPrimitive(const std::unique_ptr &cNode); - abstract::AbstractTensorPtr ConvertTensorToAbstractTensor(const std::unique_ptr &tensor); + + static abstract::AbstractTensorPtr ConvertTensorToAbstractTensor(const std::unique_ptr &tensor); + int ConvertAbstract(const std::unique_ptr &src_cnode, const CNodePtr &dst_cnode); int AddReturnCNode() override; @@ -51,4 +53,4 @@ class AnfImporterFromMetaGraphT : public AnfImporter { }; } // namespace mindspore::lite -#endif // MINDSPORE_LITE_SRC_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_ +#endif // MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_ diff --git a/mindspore/lite/tools/anf_importer/import_from_protobuf.cc b/mindspore/lite/tools/anf_importer/import_from_protobuf.cc index 1104c66913..d1b696c4bf 100644 --- a/mindspore/lite/tools/anf_importer/import_from_protobuf.cc +++ b/mindspore/lite/tools/anf_importer/import_from_protobuf.cc @@ -239,7 +239,7 @@ int AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &node node->set_abstract(abstract_tensor); if (default_para_map_.find(value_proto.name()) != default_para_map_.end()) { - auto *tensor_info = new Tensor(kDefaultValueSwitchMap[tensor_typeproto.elem_type()], shape); + auto *tensor_info = new (std::nothrow) Tensor(kDefaultValueSwitchMap[tensor_typeproto.elem_type()], shape); if (tensor_info == nullptr) { return RET_MEMORY_FAILED; } @@ -345,7 +345,6 @@ ValuePtr AnfImporterFromProtobuf::ObtainCNodeAttrInScalarForm(const onnx::Tensor MS_LOG(ERROR) << "Obtain attr in scalar-form has not support input type: " << attr_tensor_type; return {}; } - return {}; } bool AnfImporterFromProtobuf::ObtainCNodeAttrInTensorForm(const PrimitivePtr &prim, const std::string &attr_name, @@ -871,7 +870,7 @@ int AnfImporterFromProtobuf::Import(const schema::QuantType &quantType) { } onnx::ModelProto *AnfImporterFromProtobuf::ReadOnnxFromBinary(const std::string &model_path) { - auto onnx_model = new onnx::ModelProto; + auto onnx_model = new (std::nothrow) onnx::ModelProto; if (RET_OK != ValidateFileStr(model_path, ".mindir")) { MS_LOG(ERROR) << "INPUT ILLEGAL: modelFile must be *.mindir"; ReturnCode::GetSingleReturnCode()->UpdateReturnCode(RET_INPUT_PARAM_INVALID); diff --git a/mindspore/lite/tools/anf_importer/import_from_protobuf.h b/mindspore/lite/tools/anf_importer/import_from_protobuf.h index 3ca2a46cb5..11f831e66e 100644 --- a/mindspore/lite/tools/anf_importer/import_from_protobuf.h +++ b/mindspore/lite/tools/anf_importer/import_from_protobuf.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MINDSPORE_LITE_SRC_ANF_IMPORTER_IMPORTER_FROM_PROTOBUF_H_ -#define MINDSPORE_LITE_SRC_ANF_IMPORTER_IMPORTER_FROM_PROTOBUF_H_ +#ifndef MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_PROTOBUF_H_ +#define MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_PROTOBUF_H_ #include #include @@ -81,4 +81,4 @@ class AnfImporterFromProtobuf : public AnfImporter { }; } // namespace mindspore::lite -#endif // MINDSPORE_LITE_SRC_ANF_IMPORTER_IMPORTER_FROM_PROTOBUF_H_ +#endif // MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_PROTOBUF_H_ diff --git a/mindspore/lite/tools/common/flag_parser.cc b/mindspore/lite/tools/common/flag_parser.cc index 053261106a..fe41d832b4 100644 --- a/mindspore/lite/tools/common/flag_parser.cc +++ b/mindspore/lite/tools/common/flag_parser.cc @@ -24,7 +24,6 @@ Option FlagParser::ParseFlags(int argc, const char *const *argv, bo bool supportDuplicate) { MS_ASSERT(argv != nullptr); const int FLAG_PREFIX_LEN = 2; - // Get binary name binName = GetFileName(argv[0]); std::multimap> keyValues; @@ -45,9 +44,7 @@ Option FlagParser::ParseFlags(int argc, const char *const *argv, bo Option value = Option(None()); size_t pos = flagItem.find_first_of('='); - if (pos == std::string::npos && flagItem.find("--no-") != std::string::npos) { - key = flagItem.substr(FLAG_PREFIX_LEN); - } else if (pos == std::string::npos) { + if (pos == std::string::npos) { key = flagItem.substr(FLAG_PREFIX_LEN); } else { key = flagItem.substr(FLAG_PREFIX_LEN, pos - FLAG_PREFIX_LEN); @@ -81,10 +78,10 @@ bool FlagParser::GetRealFlagName(std::string *flagName, const std::string &oriFl // Inner parse function Option FlagParser::InnerParseFlags(std::multimap> *keyValues) { MS_ASSERT(keyValues != nullptr); - for (auto it = keyValues->begin(); it != keyValues->end(); ++it) { + for (auto &keyValue : *keyValues) { std::string flagName; - bool opaque = GetRealFlagName(&flagName, (*it).first); - Option flagValue = (*it).second; + bool opaque = GetRealFlagName(&flagName, keyValue.first); + Option flagValue = keyValue.second; auto item = flags.find(flagName); if (item == flags.end()) { @@ -133,7 +130,7 @@ Option FlagParser::InnerParseFlags(std::multimap(None()); } -void Replaceall(std::string *str, const std::string &oldValue, const std::string &newValue) { +void ReplaceAll(std::string *str, const std::string &oldValue, const std::string &newValue) { if (str == nullptr) { MS_LOG(ERROR) << "Input str is nullptr"; return; @@ -153,9 +150,9 @@ std::string FlagParser::Usage(const Option &usgMsg) const { std::string usageString = usgMsg.IsSome() ? usgMsg.Get() + "\n" : ""; // usage of bin name usageString += usageMsg.IsNone() ? "\nusage: " + binName + " [options]\n" : usageMsg.Get() + "\n"; - // help line of help message, usageLine:message of parametors - std::string helpLine = ""; - std::string usageLine = ""; + // help line of help message, usageLine:message of parameters + std::string helpLine; + std::string usageLine; uint32_t i = 0; for (auto flag = flags.begin(); flag != flags.end(); flag++) { std::string flagName = flag->second.flagName; @@ -165,7 +162,7 @@ std::string FlagParser::Usage(const Option &usgMsg) const { if (++i <= flags.size()) { // add parameter help message of each line thisLine += " " + helpInfo; - Replaceall(&helpInfo, "\n\r", "\n"); + ReplaceAll(&helpInfo, "\n\r", "\n"); usageLine += thisLine + "\n"; } else { // breif help message diff --git a/mindspore/lite/tools/common/flag_parser.h b/mindspore/lite/tools/common/flag_parser.h index 69d27c5d54..dc4c913db5 100644 --- a/mindspore/lite/tools/common/flag_parser.h +++ b/mindspore/lite/tools/common/flag_parser.h @@ -14,21 +14,18 @@ * limitations under the License. */ -#ifndef PREDICT_COMMON_FLAG_PARSER_H_ -#define PREDICT_COMMON_FLAG_PARSER_H_ +#ifndef MINDSPORE_LITE_TOOLS_COMMON_FLAG_PARSER_H +#define MINDSPORE_LITE_TOOLS_COMMON_FLAG_PARSER_H #include #include #include #include - #include "src/common/utils.h" #include "tools/common/option.h" namespace mindspore { namespace lite { -struct FlagInfo; - struct Nothing {}; class FlagParser { @@ -44,6 +41,7 @@ class FlagParser { template void AddFlag(T1 *t1, const std::string &flagName, const std::string &helpInfo, const T2 *t2); + template void AddFlag(T1 *t1, const std::string &flagName, const std::string &helpInfo, const T2 &t2); @@ -94,7 +92,7 @@ class FlagParser { Option InnerParseFlags(std::multimap> *values); - bool GetRealFlagName(std::string *flagName, const std::string &oriFlagName); + static bool GetRealFlagName(std::string *flagName, const std::string &oriFlagName); std::map flags; }; @@ -181,7 +179,7 @@ void FlagParser::AddFlag(T1 *t1, const std::string &flagName, const std::string FlagInfo flagItem; - // flagItem is as a output parameter + // flagItem is as an output parameter ConstructFlag(t1, flagName, helpInfo, flagItem); flagItem.parse = [t1](FlagParser *base, const std::string &value) -> Option { if (base != nullptr) { @@ -301,4 +299,4 @@ void FlagParser::AddFlag(Option Flags::*t, const std::string &flagName, const } // namespace lite } // namespace mindspore -#endif // PREDICT_COMMON_FLAG_PARSER_H_ +#endif // MINDSPORE_LITE_TOOLS_COMMON_FLAG_PARSER_H diff --git a/mindspore/lite/tools/common/graph_util.cc b/mindspore/lite/tools/common/graph_util.cc index 033afe9124..7939cd8e03 100644 --- a/mindspore/lite/tools/common/graph_util.cc +++ b/mindspore/lite/tools/common/graph_util.cc @@ -15,8 +15,7 @@ */ #include "tools/common/graph_util.h" -#include -#include +#include #include #include #include "schema/inner/model_generated.h" @@ -29,7 +28,10 @@ namespace mindspore { namespace lite { OpDefCopyer GetSimpleOpCopyer() { return [](CNodeT *inCNode) -> std::unique_ptr { - std::unique_ptr newCNode(new CNodeT); + std::unique_ptr newCNode = std::make_unique(); + if (newCNode == nullptr) { + return nullptr; + } newCNode->name = inCNode->name; newCNode->quantType = inCNode->quantType; @@ -163,8 +165,6 @@ STATUS IsolateNode(schema::MetaGraphT *graphT, CNodeT *node) { } } - // whether need to remove weightInputTensores - // remove all node's outputTensors RemoveTensor(graphT, outputTensorIdxes); node->inputIndex.clear(); node->outputIndex.clear(); @@ -183,8 +183,11 @@ STATUS IsolateOneWayNode(schema::MetaGraphT *graphT, size_t nodeIdx, bool remove MS_LOG(ERROR) << "nodeIdx out of range: " << nodeIdx; return RET_PARAM_INVALID; } - CNodeT *node = graphT->nodes.at(nodeIdx).get(); + if (node == nullptr) { + MS_LOG(ERROR) << "node is null"; + return RET_NULL_PTR; + } auto inputTensorIdxes = node->inputIndex; auto outputTensorIdxes = node->outputIndex; auto preNodeIdxes = GetInputNodeIdx(*graphT, nodeIdx); @@ -244,6 +247,7 @@ STATUS IsolateOneWayNode(schema::MetaGraphT *graphT, CNodeT *node, bool removeTe size_t nodeIdx = 0; for (size_t i = 0; i < graphT->nodes.size(); i++) { auto &inNode = graphT->nodes.at(i); + MS_ASSERT(inNode != nullptr); if (inNode->name == node->name) { isSubNode = true; nodeIdx = i; @@ -259,6 +263,7 @@ STATUS IsolateOneWayNode(schema::MetaGraphT *graphT, CNodeT *node, bool removeTe } STATUS RemoveTensor(schema::MetaGraphT *graphT, std::vector toDeleteTensorIdxes, bool forceDelete) { + MS_ASSERT(graphT != nullptr); for (auto iter = toDeleteTensorIdxes.begin(); iter != toDeleteTensorIdxes.end();) { uint32_t deleteIdx = *iter; if (!forceDelete) { @@ -297,6 +302,7 @@ STATUS RemoveTensor(schema::MetaGraphT *graphT, std::vector toDeleteTe } STATUS UpdateNodeIndex(CNodeT *node, uint32_t deleteIdx) { + MS_ASSERT(node != nullptr); for (auto inIdxIt = node->inputIndex.begin(); inIdxIt != node->inputIndex.end();) { if (*inIdxIt == deleteIdx) { inIdxIt = node->inputIndex.erase(inIdxIt); @@ -330,6 +336,7 @@ STATUS AddTensor2Node(schema::MetaGraphT *graphT, uint32_t nodeIdx, std::unique_ graphT->allTensors.emplace_back(std::move(tensor)); uint32_t newTensorIdx = graphT->allTensors.size() - 1; auto node = graphT->nodes.at(nodeIdx).get(); + MS_ASSERT(node != nullptr); if (place == kBefore) { node->inputIndex.emplace_back(newTensorIdx); } else { @@ -340,11 +347,13 @@ STATUS AddTensor2Node(schema::MetaGraphT *graphT, uint32_t nodeIdx, std::unique_ STATUS ReplaceTensorOfNode(schema::MetaGraphT *graphT, uint32_t nodeIdx, uint32_t inTensorIdx, std::unique_ptr tensor) { + MS_ASSERT(graphT != nullptr); if (nodeIdx >= graphT->nodes.size()) { MS_LOG(ERROR) << "nodeIdx out of range: " << nodeIdx; return RET_PARAM_INVALID; } auto node = graphT->nodes.at(nodeIdx).get(); + MS_ASSERT(node != nullptr); if (inTensorIdx >= graphT->allTensors.size()) { MS_LOG(ERROR) << "inTensorIdx out of range: " << nodeIdx; return RET_PARAM_INVALID; @@ -358,7 +367,9 @@ STATUS ReplaceTensorOfNode(schema::MetaGraphT *graphT, uint32_t nodeIdx, uint32_ } NodeIter InsertNode(schema::MetaGraphT *graphT, uint32_t existNodeIdx, InsertPlace place, size_t inoutIndex, - std::unique_ptr toAddNode, STATUS *errorCode, OpDefCopyer opDefCopyer) { + std::unique_ptr toAddNode, STATUS *errorCode, const OpDefCopyer &opDefCopyer) { + MS_ASSERT(graphT != nullptr); + MS_ASSERT(errorCode != nullptr); if (existNodeIdx >= graphT->nodes.size()) { MS_LOG(ERROR) << "nodeIdx out of range: " << existNodeIdx; return graphT->nodes.end(); @@ -370,7 +381,9 @@ NodeIter InsertNode(schema::MetaGraphT *graphT, uint32_t existNodeIdx, InsertPla } NodeIter InsertNode(schema::MetaGraphT *graphT, NodeIter existNodeIter, InsertPlace place, size_t inoutIndexIdx, - std::unique_ptr toAddNode, STATUS *errorCode, OpDefCopyer opDefCopyer) { + std::unique_ptr toAddNode, STATUS *errorCode, const OpDefCopyer &opDefCopyer) { + MS_ASSERT(graphT != nullptr); + MS_ASSERT(errorCode != nullptr); if (place == kBefore) { return InsertNodeBefore(graphT, existNodeIter, inoutIndexIdx, std::move(toAddNode), errorCode, opDefCopyer); } else if (place == kAfter) { @@ -382,7 +395,9 @@ NodeIter InsertNode(schema::MetaGraphT *graphT, NodeIter existNodeIter, InsertPl } NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, size_t inputIndexIdx, - std::unique_ptr toAddNodeIn, STATUS *errorCode, OpDefCopyer opDefCopyer) { + std::unique_ptr toAddNodeIn, STATUS *errorCode, const OpDefCopyer &opDefCopyer) { + MS_ASSERT(graphT != nullptr); + MS_ASSERT(errorCode != nullptr); auto &existNode = *existNodeIter; MS_ASSERT(existNode != nullptr); MS_ASSERT(existNode->inputIndex.size() > inputIndexIdx); @@ -390,7 +405,7 @@ NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, si auto preTensorIdx = existNode->inputIndex.at(inputIndexIdx); MS_ASSERT(graphT->allTensors.size() > preTensorIdx); - auto preNodeIdxes = GetInputNodeIdx(*graphT, *(existNode.get()), inputIndexIdx); + auto preNodeIdxes = GetInputNodeIdx(*graphT, *(existNode), inputIndexIdx); if (preNodeIdxes.empty()) { auto &preTensor = graphT->allTensors.at(preTensorIdx); MS_ASSERT(preTensor != nullptr); @@ -402,9 +417,12 @@ NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, si } preTensor->refCount = 0; preTensor->data.clear(); + MS_ASSERT(toAddNodeIn->primitive != nullptr); if (toAddNodeIn->primitive->value.type == schema::PrimitiveType_QuantDTypeCast) { - preTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->srcT; - toAddTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->dstT; + auto prim = toAddNodeIn->primitive->value.AsQuantDTypeCast(); + MS_ASSERT(prim != nullptr); + preTensor->dataType = prim->srcT; + toAddTensor->dataType = prim->dstT; } graphT->allTensors.emplace_back(std::move(toAddTensor)); size_t toAddTensorIdx = graphT->allTensors.size() - 1; @@ -438,9 +456,12 @@ NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, si MS_LOG(ERROR) << "Copy TensorT failed"; return graphT->nodes.end(); } + MS_ASSERT(toAddNodeIn->primitive != nullptr); if (toAddNodeIn->primitive->value.type == schema::PrimitiveType_QuantDTypeCast) { - preTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->srcT; - toAddTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->dstT; + auto prim = toAddNodeIn->primitive->value.AsQuantDTypeCast(); + MS_ASSERT(prim != nullptr); + preTensor->dataType = prim->srcT; + toAddTensor->dataType = prim->dstT; } graphT->allTensors.emplace_back(std::move(toAddTensor)); size_t toAddTensorIdx = graphT->allTensors.size() - 1; @@ -473,7 +494,10 @@ NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, si } NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, size_t outputIndexIdx, - std::unique_ptr toAddNodeIn, STATUS *errorCode, OpDefCopyer opDefCopyer) { + std::unique_ptr toAddNodeIn, STATUS *errorCode, + const OpDefCopyer &opDefCopyer) { + MS_ASSERT(graphT != nullptr); + MS_ASSERT(errorCode != nullptr); auto &existNode = *existNodeIter; MS_ASSERT(existNode != nullptr); MS_ASSERT(existNode->outputIndex.size() > outputIndexIdx); @@ -481,7 +505,7 @@ NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, siz auto postTensorIdx = existNode->outputIndex.at(outputIndexIdx); MS_ASSERT(graphT->allTensors.size() > postTensorIdx); - auto postNodeIdxes = GetOutputNodeIdx(*graphT, *(existNode.get()), outputIndexIdx); + auto postNodeIdxes = GetOutputNodeIdx(*graphT, *(existNode), outputIndexIdx); if (postNodeIdxes.empty()) { auto &postTensor = graphT->allTensors.at(postTensorIdx); MS_ASSERT(postTensor != nullptr); @@ -491,9 +515,12 @@ NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, siz *errorCode = RET_NULL_PTR; return graphT->nodes.end(); } + MS_ASSERT(toAddNodeIn->primitive != nullptr); if (toAddNodeIn->primitive->value.type == schema::PrimitiveType_QuantDTypeCast) { - postTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->srcT; - toAddTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->dstT; + auto prim = toAddNodeIn->primitive->value.AsQuantDTypeCast(); + MS_ASSERT(prim != nullptr); + postTensor->dataType = prim->srcT; + toAddTensor->dataType = prim->dstT; } graphT->allTensors.emplace_back(std::move(toAddTensor)); size_t toAddTensorIdx = graphT->allTensors.size() - 1; @@ -554,9 +581,12 @@ NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, siz *errorCode = RET_NULL_PTR; return graphT->nodes.end(); } + MS_ASSERT(toAddNodeIn->primitive != nullptr); if (toAddNodeIn->primitive->value.type == schema::PrimitiveType_QuantDTypeCast) { - postTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->srcT; - toAddTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->dstT; + auto prim = toAddNodeIn->primitive->value.AsQuantDTypeCast(); + MS_ASSERT(prim != nullptr); + postTensor->dataType = prim->srcT; + toAddTensor->dataType = prim->dstT; } graphT->allTensors.emplace_back(std::move(toAddTensor)); size_t toAddTensorIdx = graphT->allTensors.size() - 1; @@ -589,13 +619,9 @@ NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, siz return existNodeIter; } -STATUS ValidateFileStr(const std::string &modelFile, std::string fileType) { - if (modelFile.size() > fileType.size()) { - if (modelFile.substr(modelFile.size() - fileType.size()) == fileType) { - return RET_OK; - } else { - return RET_ERROR; - } +STATUS ValidateFileStr(const std::string &modelFile, const std::string &fileType) { + if (modelFile.size() > fileType.size() && modelFile.substr(modelFile.size() - fileType.size()) == fileType) { + return RET_OK; } else { return RET_ERROR; } diff --git a/mindspore/lite/tools/common/graph_util.h b/mindspore/lite/tools/common/graph_util.h index ea0370a97c..5e5e3fe083 100644 --- a/mindspore/lite/tools/common/graph_util.h +++ b/mindspore/lite/tools/common/graph_util.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MINDSPORE_PREDICT_GRAPH_UTIL_H -#define MINDSPORE_PREDICT_GRAPH_UTIL_H +#ifndef MINDSPORE_LITE_TOOLS_COMMON_GRAPH_UTIL_H +#define MINDSPORE_LITE_TOOLS_COMMON_GRAPH_UTIL_H #include #include @@ -23,7 +23,6 @@ #include #include #include - #include "include/errorcode.h" #include "schema/inner/model_generated.h" #include "src/common/graph_util.h" @@ -73,19 +72,19 @@ STATUS ReplaceTensorOfNode(schema::MetaGraphT *graphT, uint32_t nodeIdx, uint32_ NodeIter InsertNode(schema::MetaGraphT *graphT, uint32_t existNodeIdx, InsertPlace place, size_t inoutIndex, std::unique_ptr toAddNode, STATUS *errorCode, - OpDefCopyer opDefCopyer = GetSimpleOpCopyer()); + const OpDefCopyer &opDefCopyer = GetSimpleOpCopyer()); NodeIter InsertNode(schema::MetaGraphT *graphT, NodeIter existNodeIter, InsertPlace place, size_t inoutIndexIdx, std::unique_ptr toAddNode, STATUS *errorCode, - OpDefCopyer opDefCopyer = GetSimpleOpCopyer()); + const OpDefCopyer &opDefCopyer = GetSimpleOpCopyer()); NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, size_t inputIndexIdx, - std::unique_ptr toAddNode, STATUS *errorCode, OpDefCopyer opDefCopyer); + std::unique_ptr toAddNode, STATUS *errorCode, const OpDefCopyer &opDefCopyer); NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, size_t outputIndexIdx, - std::unique_ptr toAddNode, STATUS *errorCode, OpDefCopyer opDefCopyer); + std::unique_ptr toAddNode, STATUS *errorCode, const OpDefCopyer &opDefCopyer); -STATUS ValidateFileStr(const std::string &modelFile, std::string fileType); +STATUS ValidateFileStr(const std::string &modelFile, const std::string &fileType); void TransformAttrByAxes(int *origin_attr, int *axes, int element_size); @@ -97,4 +96,4 @@ std::string GetModelName(const std::string &modelFile); } // namespace lite } // namespace mindspore -#endif // MINDSPORE_PREDICT_GRAPH_UTIL_H +#endif // MINDSPORE_LITE_TOOLS_COMMON_GRAPH_UTIL_H diff --git a/mindspore/lite/tools/common/node_util.cc b/mindspore/lite/tools/common/node_util.cc index 41516999a8..8c182cda35 100644 --- a/mindspore/lite/tools/common/node_util.cc +++ b/mindspore/lite/tools/common/node_util.cc @@ -160,6 +160,7 @@ std::vector GetInt8OpList() { return int8OpList; } STATUS NodeUtils::ConvertDims(mindspore::schema::Format src_format, const std::vector &src_dims, mindspore::schema::Format dst_format, std::vector *dst_dims) { + MS_ASSERT(nullptr != dst_dims); if ((src_dims.size() != DIM_DEFAULT_SIZE && src_dims.size() != 3) || src_format == dst_format) { MS_LOG(ERROR) << "Convert format , src size " << src_dims.size() << " <3 or src format is equal to dst format,not need convert"; @@ -189,7 +190,7 @@ STATUS NodeUtils::ConvertDims(mindspore::schema::Format src_format, const std::v return RET_ERROR; } - if (nchw_dim.size() == 0) { + if (nchw_dim.empty()) { MS_LOG(ERROR) << "Param nchw_dim is empty!"; return RET_ERROR; } @@ -215,6 +216,10 @@ STATUS NodeUtils::ConvertDims(mindspore::schema::Format src_format, const std::v STATUS GetFilterDim(const std::vector &oriDims, kTransFilterType type, int32_t *filterK, int32_t *filterC, int32_t *filterH, int32_t *filterW) { + if (filterK == nullptr || filterC == nullptr || filterH == nullptr || filterW == nullptr) { + MS_LOG(ERROR) << "null input"; + return RET_NULL_PTR; + } MS_ASSERT(oriDims.size() == 4); if (type == kKCHW2HWCK || type == kKCHW2HWKC || type == kKCHW2KHWC || type == kKCHW2CKHW) { *filterK = oriDims.at(KCHW_K); @@ -282,6 +287,7 @@ STATUS SetFilterDim(schema::TensorT *tensor, kTransFilterType type, int32_t filt STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat) { if (tensor == nullptr) { + MS_LOG(ERROR) << "tensor is null"; return RET_NULL_PTR; } std::vector oriDims = tensor->dims; diff --git a/mindspore/lite/tools/common/node_util.h b/mindspore/lite/tools/common/node_util.h index d78ea34104..a0b247cd50 100644 --- a/mindspore/lite/tools/common/node_util.h +++ b/mindspore/lite/tools/common/node_util.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MINDSPORE_PREDICT_NODE_UTIL_H -#define MINDSPORE_PREDICT_NODE_UTIL_H +#ifndef MINDSPORE_LITE_TOOLS_COMMON_NODE_UTIL_H +#define MINDSPORE_LITE_TOOLS_COMMON_NODE_UTIL_H #include #include @@ -60,13 +60,6 @@ class NodeUtils { public: static STATUS ConvertDims(schema::Format src_format, const std::vector &src_dims, schema::Format dst_format, std::vector *dst_dims); - - static void SliceData(std::vector &input, int64_t chunk_size, std::vector &output, int64_t begin, - int64_t out_dim, int64_t stride); - - static STATUS SetOutputSliceData(void *data, int64_t data_size, int32_t data_type, std::vector &input_dims, - std::vector &begin, std::vector &output_dims, - schema::TensorT *output, std::vector &stride); }; enum kTransFilterType { @@ -133,7 +126,7 @@ static STATUS TransFilterData(schema::TensorT *tensor, kTransFilterType type, in if (type == kCHWK2HWCK) { p2Buff = buf.get() + ((h * filterW * filterC * filterK) + (w * filterC * filterK) + (c * filterK) + (k)); - } else if (type == kCHWK2KHWC) { + } else { p2Buff = buf.get() + ((k * filterH * filterW * filterC) + (h * filterW * filterC) + (w * filterC) + (c)); } @@ -334,4 +327,4 @@ static STATUS TransFilterFormat(schema::TensorT *tensor, kTransFilterType type) STATUS TransFilterFormat(schema::TensorT *tensor, schema::Format dstFormat); } // namespace lite } // namespace mindspore -#endif // MINDSPORE_PREDICT_NODE_UTIL_H +#endif // MINDSPORE_LITE_TOOLS_COMMON_NODE_UTIL_H diff --git a/mindspore/lite/tools/common/option.h b/mindspore/lite/tools/common/option.h index d39473e8eb..c57063f6bc 100644 --- a/mindspore/lite/tools/common/option.h +++ b/mindspore/lite/tools/common/option.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef PREDICT_COMMON_OPTION_H_ -#define PREDICT_COMMON_OPTION_H_ +#ifndef MINDSPORE_LITE_TOOLS_COMMON_OPTION_H +#define MINDSPORE_LITE_TOOLS_COMMON_OPTION_H #include #include @@ -56,7 +56,7 @@ class Option { } } - virtual ~Option() {} + virtual ~Option() = default; bool IsNone() const { return state == NONE; } @@ -116,4 +116,4 @@ class Option { } // namespace lite } // namespace mindspore -#endif // PREDICT_COMMON_OPTION_H_ +#endif // MINDSPORE_LITE_TOOLS_COMMON_OPTION_H diff --git a/mindspore/lite/tools/common/protobuf_utils.h b/mindspore/lite/tools/common/protobuf_utils.h index f49f93fdd9..e9419f9059 100644 --- a/mindspore/lite/tools/common/protobuf_utils.h +++ b/mindspore/lite/tools/common/protobuf_utils.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_PARSE_UTILS_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_PARSE_UTILS_H_ +#ifndef MINDSPORE_LITE_TOOLS_COMMON_PROTOBUF_UTILS_H +#define MINDSPORE_LITE_TOOLS_COMMON_PROTOBUF_UTILS_H #include #include @@ -35,4 +35,4 @@ STATUS ReadProtoFromBinaryFile(const char *file, google::protobuf::Message *mess } // namespace lite } // namespace mindspore -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_PARSE_UTILS_H_ +#endif // MINDSPORE_LITE_TOOLS_COMMON_PROTOBUF_UTILS_H diff --git a/mindspore/lite/tools/common/storage.cc b/mindspore/lite/tools/common/storage.cc index a94107b6d8..329596d500 100644 --- a/mindspore/lite/tools/common/storage.cc +++ b/mindspore/lite/tools/common/storage.cc @@ -50,7 +50,7 @@ int Storage::Save(const schema::MetaGraphT &graph, const std::string &outputPath } schema::MetaGraphT *Storage::Load(const std::string &inputPath) { - size_t size; + size_t size = 0; auto buf = ReadFile(inputPath.c_str(), &size); if (buf == nullptr) { MS_LOG(ERROR) << "the file buffer is nullptr"; @@ -58,7 +58,7 @@ schema::MetaGraphT *Storage::Load(const std::string &inputPath) { } flatbuffers::Verifier verify((const uint8_t *)buf, size); - if (false == schema::VerifyMetaGraphBuffer(verify)) { + if (!schema::VerifyMetaGraphBuffer(verify)) { MS_LOG(ERROR) << "the buffer is invalid and fail to create meta graph"; return nullptr; } diff --git a/mindspore/lite/tools/common/storage.h b/mindspore/lite/tools/common/storage.h index ef0f12bf5e..7f1906b0f6 100644 --- a/mindspore/lite/tools/common/storage.h +++ b/mindspore/lite/tools/common/storage.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef PREDICT_COMMON_STORAGE_H_ -#define PREDICT_COMMON_STORAGE_H_ +#ifndef MINDSPORE_LITE_TOOLS_COMMON_STORAGE_H +#define MINDSPORE_LITE_TOOLS_COMMON_STORAGE_H #include #include @@ -27,11 +27,11 @@ namespace mindspore { namespace lite { class Storage { public: - int Save(const schema::MetaGraphT &graph, const std::string &outputPath); + static int Save(const schema::MetaGraphT &graph, const std::string &outputPath); - schema::MetaGraphT *Load(const std::string &inputPath); + static schema::MetaGraphT *Load(const std::string &inputPath); }; } // namespace lite } // namespace mindspore -#endif // PREDICT_COMMON_STORAGE_H_ +#endif // MINDSPORE_LITE_TOOLS_COMMON_STORAGE_H diff --git a/mindspore/lite/tools/common/tensor_util.cc b/mindspore/lite/tools/common/tensor_util.cc index c7e85c9425..389ab8bb62 100644 --- a/mindspore/lite/tools/common/tensor_util.cc +++ b/mindspore/lite/tools/common/tensor_util.cc @@ -14,7 +14,6 @@ * limitations under the License. */ -#include #include "src/common/utils.h" #include "tools/common/tensor_util.h" #include "tools/common/graph_util.h" diff --git a/mindspore/lite/tools/common/tensor_util.h b/mindspore/lite/tools/common/tensor_util.h index c908d94c46..c53fc8d302 100644 --- a/mindspore/lite/tools/common/tensor_util.h +++ b/mindspore/lite/tools/common/tensor_util.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MINDSPORE_PREDICT_TENSOR_UTIL_H -#define MINDSPORE_PREDICT_TENSOR_UTIL_H +#ifndef MINDSPORE_LITE_TOOLS_COMMON_TENSOR_UTIL_H +#define MINDSPORE_LITE_TOOLS_COMMON_TENSOR_UTIL_H #include #include @@ -58,13 +58,11 @@ std::unique_ptr CopyQuantParamT(const std::unique_ptr CopyQuantParamArrayT( const std::unique_ptr &srcQuantParamArray); -using MSGraphDefTPtr = std::shared_ptr; - enum Category { CONST = 0, GRAPH_INPUT = 1, OP_OUTPUT = 2, TF_CONST = 3 }; class TensorCache { public: - TensorCache() {} + TensorCache() = default; ~TensorCache() { tensors.clear(); } @@ -97,12 +95,12 @@ class TensorCache { return -1; } - void UpdateTensorIndex(const std::string &name, int index) { + void UpdateTensorIndex(const std::string &name, int idx) { auto iter = tensorIndex.find(name); if (iter != tensorIndex.end()) { - tensorIndex[name] = index; + tensorIndex[name] = idx; } else { - tensorIndex.insert(make_pair(name, index)); + tensorIndex.insert(make_pair(name, idx)); } } @@ -120,4 +118,4 @@ class TensorCache { } // namespace lite } // namespace mindspore -#endif // MINDSPORE_PREDICT_TENSOR_UTIL_H +#endif // MINDSPORE_LITE_TOOLS_COMMON_TENSOR_UTIL_H diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.cc index b8693b08f7..a2716ba53d 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.cc @@ -38,17 +38,16 @@ STATUS CaffeArgMaxParser::Parse(const caffe::LayerParameter &proto, const caffe: return RET_NULL_PTR; } - // set default params attr->outMaxValue = false; attr->topK = 1; - const caffe::ArgMaxParameter argmaxParam = proto.argmax_param(); + const caffe::ArgMaxParameter &argmaxParam = proto.argmax_param(); if (argmaxParam.has_out_max_val()) { attr->outMaxValue = argmaxParam.out_max_val(); } if (argmaxParam.has_top_k()) { attr->topK = argmaxParam.top_k(); } - int32_t axisType; + int32_t axisType = 0; int32_t axis = 0; if (!argmaxParam.has_axis()) { axisType = 2; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.h index 56305fb9a8..672699904a 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.h @@ -26,7 +26,8 @@ namespace lite { class CaffeArgMaxParser : public CaffeNodeParser { public: CaffeArgMaxParser() : CaffeNodeParser("argmax") {} - ~CaffeArgMaxParser() = default; + ~CaffeArgMaxParser() override = default; + STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; }; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc index 24a24128ba..12b72ff730 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc @@ -19,12 +19,6 @@ #include #include "tools/common/tensor_util.h" -#define CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT 0.00001 -#define CAFFE_BATCH_NORM_ESP_DEFAULT_DIFF_FLOAT 0.000000001 - -static const int CAFFE_BATCHNORMAL_BOTTOM_SIZE = 1; -static const int CAFFE_BATCHNORMAL_TOP_SIZE = 1; - namespace mindspore { namespace lite { using STATUS = int; @@ -32,6 +26,10 @@ using STATUS = int; STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeBatchNormParser"; + if (weightVec == nullptr) { + MS_LOG(ERROR) << "weightVec is null"; + return RET_NULL_PTR; + } if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -48,43 +46,38 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf return RET_NULL_PTR; } - const caffe::BatchNormParameter batchNormParam = proto.batch_norm_param(); - // check bottom size - if (proto.bottom_size() != CAFFE_BATCHNORMAL_BOTTOM_SIZE) { - MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "bottom numbers is error, it must be " - << CAFFE_BATCHNORMAL_BOTTOM_SIZE << "but is " << proto.bottom_size(); + const caffe::BatchNormParameter &batchNormParam = proto.batch_norm_param(); + if (proto.bottom_size() != 1) { + MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "bottom numbers is error, it must be 1, but is " + << proto.bottom_size(); return RET_ERROR; } - - // check top size - if (proto.top_size() != CAFFE_BATCHNORMAL_TOP_SIZE) { - MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "top numbers is error, it must be " - << CAFFE_BATCHNORMAL_TOP_SIZE << "but is " << proto.top_size(); + if (proto.top_size() != 1) { + MS_LOG(ERROR) << "Layer " << proto.name().c_str() << "top numbers is error, it must be 1, but is " + << proto.top_size(); return RET_ERROR; } if (batchNormParam.has_eps()) { - if (fabs(CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT - batchNormParam.eps()) < CAFFE_BATCH_NORM_ESP_DEFAULT_DIFF_FLOAT) { - attr->epsilon = CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT; + if (std::fabs(1e-5 - batchNormParam.eps()) < 1e-9) { + attr->epsilon = 1e-5; } else { auto tmpAuto = batchNormParam.eps(); attr->epsilon = tmpAuto; } } else { - attr->epsilon = CAFFE_BATCH_NORM_ESP_DEFAULT_FLOAT; + attr->epsilon = 1e-5; } const float blob2Data = (weight.blobs(2).double_data_size() > 0) ? weight.blobs(2).double_data(0) : weight.blobs(2).data(0); const float scaleFactor = blob2Data == 0 ? 0 : 1 / blob2Data; - // parse weight gamma auto gamma = ConvertWeight(weight.blobs(0)); if (gamma == nullptr) { MS_LOG(ERROR) << "Convert blobs(0) for layer " << weight.name().c_str() << " failed"; return RET_ERROR; } - auto estimatedMean = reinterpret_cast(gamma->data.data()); auto estimatedMeanShapeSize = GetShapeSize(*gamma); for (size_t i = 0; i < estimatedMeanShapeSize; i++) { @@ -93,13 +86,11 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf estimatedMean = nullptr; weightVec->push_back(gamma); - // parse weight beta auto beta = ConvertWeight(weight.blobs(1)); if (beta == nullptr) { MS_LOG(ERROR) << "Convert blobs(1) for layer " << weight.name().c_str() << " failed"; return RET_ERROR; } - auto estimatedVariance = reinterpret_cast(beta->data.data()); size_t estimatedVarianceShapeSize = GetShapeSize(*beta); for (size_t i = 0; i < estimatedVarianceShapeSize; i++) { diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.h index fc5e0bb529..9079682697 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.h @@ -26,6 +26,7 @@ namespace lite { class CaffeBatchNormParser : public CaffeNodeParser { public: CaffeBatchNormParser() : CaffeNodeParser("batchnorm") {} + ~CaffeBatchNormParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc index 682106ee47..2db9fa09cc 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc @@ -17,8 +17,6 @@ #include "tools/converter/parser/caffe/caffe_concat_parser.h" #include -const int32_t CONCAT_DEFAULT_AXIS = 1; - namespace mindspore { namespace lite { STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, @@ -40,7 +38,7 @@ STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto, const caffe: return RET_NULL_PTR; } - const caffe::ConcatParameter concatParam = proto.concat_param(); + const caffe::ConcatParameter &concatParam = proto.concat_param(); if (concatParam.has_axis() && concatParam.has_concat_dim()) { MS_LOG(ERROR) << "Concat param in caffe have concat_dim and axis simultaneously, return fail"; return RET_ERROR; @@ -48,19 +46,19 @@ STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto, const caffe: if (concatParam.has_concat_dim()) { MS_LOG(DEBUG) << "Concat dim , set axis: " << concatParam.concat_dim(); - int32_t concat_dim_value = (int32_t)concatParam.concat_dim(); + auto concat_dim_value = (int32_t)concatParam.concat_dim(); if (concat_dim_value < 0) { MS_LOG(ERROR) << "concat_dim value in model is smaller than 0:" << concat_dim_value; return RET_ERROR; } attr->axis = concat_dim_value; } else if (concatParam.has_axis()) { - MS_LOG(DEBUG) << "axis , set axis: " << concatParam.axis(); - int32_t tmpInt = (int32_t)concatParam.axis(); + MS_LOG(DEBUG) << "set axis: " << concatParam.axis(); + auto tmpInt = (int32_t)concatParam.axis(); attr->axis = tmpInt; } else { - MS_LOG(DEBUG) << "default , set axis: " << CONCAT_DEFAULT_AXIS; - attr->axis = CONCAT_DEFAULT_AXIS; + MS_LOG(DEBUG) << "by default, set axis = 1"; + attr->axis = 1; } attr->n = proto.bottom_size(); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.h index 04be0c3175..c19f96f0d6 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffeConcatParser : public CaffeNodeParser { public: CaffeConcatParser() : CaffeNodeParser("concat") {} - ~CaffeConcatParser() = default; + ~CaffeConcatParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_conv_base_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_conv_base_parser.cc index dd767689d9..d68928547a 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_conv_base_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_conv_base_parser.cc @@ -17,13 +17,6 @@ #include "tools/converter/parser/caffe/caffe_conv_base_parser.h" #include -const uint32_t PAD_DEFAULT_VALUE = 0; -const uint32_t STRIDE_DEFAULT_VALUE = 1; -const uint32_t DILATION_DEFAULT_VALUE = 1; -const int32_t SPATIAL_DIM_DEFAULT_SIZE = 2; -const uint32_t DEFAULT_CONV_GROUP = 1; -static const int CAFFE_CONV_BIAS_DIM_NUM = 1; - namespace mindspore { namespace lite { STATUS CaffeConvBaseParser::ParsePads(const caffe::ConvolutionParameter &convParam, std::vector *pad) { @@ -40,15 +33,15 @@ STATUS CaffeConvBaseParser::ParsePads(const caffe::ConvolutionParameter &convPar } if (!convParam.has_pad_h()) { - (*pad)[0] = PAD_DEFAULT_VALUE; - (*pad)[1] = PAD_DEFAULT_VALUE; + (*pad)[0] = 0; + (*pad)[1] = 0; (*pad)[2] = convParam.pad_w(); (*pad)[3] = convParam.pad_w(); } else if (!convParam.has_pad_w()) { (*pad)[0] = convParam.pad_h(); (*pad)[1] = convParam.pad_h(); - (*pad)[2] = PAD_DEFAULT_VALUE; - (*pad)[3] = PAD_DEFAULT_VALUE; + (*pad)[2] = 0; + (*pad)[3] = 0; } else { (*pad)[0] = convParam.pad_h(); (*pad)[1] = convParam.pad_h(); @@ -56,15 +49,14 @@ STATUS CaffeConvBaseParser::ParsePads(const caffe::ConvolutionParameter &convPar (*pad)[3] = convParam.pad_w(); } } else { - // default 2D const int num_pad_dims = convParam.pad_size(); - int num_spatial_dims = std::max(num_pad_dims, SPATIAL_DIM_DEFAULT_SIZE); + int num_spatial_dims = std::max(num_pad_dims, 2); std::vector vec; + vec.reserve(num_spatial_dims); for (int i = 0; i < num_spatial_dims; ++i) { - vec.push_back((num_pad_dims == 0) ? PAD_DEFAULT_VALUE : convParam.pad((num_pad_dims == 1) ? 0 : i)); + vec.push_back((num_pad_dims == 0) ? 0 : convParam.pad((num_pad_dims == 1) ? 0 : i)); } - // default 2D (*pad)[0] = vec[0]; (*pad)[1] = vec[0]; (*pad)[2] = vec[1]; @@ -87,13 +79,13 @@ STATUS CaffeConvBaseParser::ParseStrides(const caffe::ConvolutionParameter &conv (*stride)[1] = convParam.stride_w(); } else { const int num_stride_dims = convParam.stride_size(); - int num_spatial_dims = std::max(num_stride_dims, SPATIAL_DIM_DEFAULT_SIZE); + int num_spatial_dims = std::max(num_stride_dims, 2); std::vector vec; + vec.reserve(num_spatial_dims); for (int i = 0; i < num_spatial_dims; ++i) { - vec.push_back((num_stride_dims == 0) ? STRIDE_DEFAULT_VALUE : convParam.stride((num_stride_dims == 1) ? 0 : i)); + vec.push_back((num_stride_dims == 0) ? 1 : convParam.stride((num_stride_dims == 1) ? 0 : i)); } - // default 2D (*stride)[0] = vec[0]; (*stride)[1] = vec[1]; } @@ -103,17 +95,15 @@ STATUS CaffeConvBaseParser::ParseStrides(const caffe::ConvolutionParameter &conv STATUS CaffeConvBaseParser::ParseDilations(const caffe::ConvolutionParameter &convParam, std::vector *dilation) { const int num_dilation_dims = convParam.dilation_size(); - int num_spatial_dims = std::max(num_dilation_dims, SPATIAL_DIM_DEFAULT_SIZE); + int num_spatial_dims = std::max(num_dilation_dims, 2); std::vector vec; + vec.reserve(num_spatial_dims); for (int i = 0; i < num_spatial_dims; ++i) { - vec.push_back((num_dilation_dims == 0) ? DILATION_DEFAULT_VALUE - : convParam.dilation((num_dilation_dims == 1) ? 0 : i)); + vec.push_back((num_dilation_dims == 0) ? 1 : convParam.dilation((num_dilation_dims == 1) ? 0 : i)); } - // default 2D (*dilation)[0] = vec[0]; (*dilation)[1] = vec[1]; - return RET_OK; } @@ -131,9 +121,11 @@ STATUS CaffeConvBaseParser::ParseKernels(const caffe::ConvolutionParameter &conv return RET_ERROR; } } else if (convParam.kernel_size_size() != 0) { - int kernel_size = convParam.kernel_size_size(); - int num_spatial_dims = std::max(kernel_size, SPATIAL_DIM_DEFAULT_SIZE); + const int kernel_size = convParam.kernel_size_size(); + int num_spatial_dims = std::max(kernel_size, 2); + std::vector vec; + vec.reserve(num_spatial_dims); for (int i = 0; i < num_spatial_dims; i++) { vec.push_back(convParam.kernel_size((kernel_size == 1) ? 0 : i)); } @@ -141,24 +133,25 @@ STATUS CaffeConvBaseParser::ParseKernels(const caffe::ConvolutionParameter &conv (*kernel)[0] = vec[0]; (*kernel)[1] = vec[1]; } else { + MS_LOG(ERROR) << "conv does not have kernel info."; return RET_ERROR; } return RET_OK; } int CaffeConvBaseParser::ParseGroup(const caffe::ConvolutionParameter &convParam, const std::string &layerType) { - // group default 1 - int group = 0; if (convParam.has_group()) { - group = convParam.group(); + return convParam.group(); } else { - layerType == "ConvolutionDepthwise" ? (group = convParam.num_output()) : (group = DEFAULT_CONV_GROUP); + return layerType == "ConvolutionDepthwise" ? static_cast(convParam.num_output()) : 1; } - return group; } int CaffeConvBaseParser::ParseChannelOut(const caffe::ConvolutionParameter &convParam, int32_t *channelOut) { - MS_ASSERT(channelOut != nullptr); + if (channelOut == nullptr) { + MS_LOG(ERROR) << "channelOut is null"; + return RET_NULL_PTR; + } if (!convParam.has_num_output()) { MS_LOG(ERROR) << "Parse num_output for failed."; return RET_ERROR; @@ -169,7 +162,11 @@ int CaffeConvBaseParser::ParseChannelOut(const caffe::ConvolutionParameter &conv STATUS CaffeConvBaseParser::ParseWeight(const caffe::LayerParameter &weight, std::vector *weightVec) { - // Layer must have Filter + if (weightVec == nullptr) { + MS_LOG(ERROR) << "op is null"; + return RET_NULL_PTR; + } + if (weight.blobs_size() == 0) { MS_LOG(ERROR) << "No filter data in layer " << weight.name().c_str(); return RET_ERROR; @@ -182,8 +179,7 @@ STATUS CaffeConvBaseParser::ParseWeight(const caffe::LayerParameter &weight, } weightVec->push_back(filter); - // parse bias - const caffe::ConvolutionParameter convParam = weight.convolution_param(); + const caffe::ConvolutionParameter &convParam = weight.convolution_param(); if (convParam.bias_term() && weight.blobs_size() > 1) { auto bias = ConvertWeight(weight.blobs(1)); if (bias == nullptr) { @@ -192,7 +188,7 @@ STATUS CaffeConvBaseParser::ParseWeight(const caffe::LayerParameter &weight, } std::vector shape = bias->dims; - if (shape.size() != CAFFE_CONV_BIAS_DIM_NUM) { + if (shape.size() != 1) { MS_LOG(ERROR) << "Bias dim-num of layer " << weight.name().c_str() << " is not supported"; return RET_ERROR; } diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_conv_base_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_conv_base_parser.h index 6313ef90c4..a0c53233e5 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_conv_base_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_conv_base_parser.h @@ -26,23 +26,23 @@ namespace mindspore { namespace lite { class CaffeConvBaseParser { public: - CaffeConvBaseParser() {} + CaffeConvBaseParser() = default; - virtual ~CaffeConvBaseParser() {} + virtual ~CaffeConvBaseParser() = default; - STATUS ParsePads(const caffe::ConvolutionParameter &conv_param, std::vector *pad); + static STATUS ParsePads(const caffe::ConvolutionParameter &conv_param, std::vector *pad); - STATUS ParseStrides(const caffe::ConvolutionParameter &conv_param, std::vector *stride); + static STATUS ParseStrides(const caffe::ConvolutionParameter &conv_param, std::vector *stride); - STATUS ParseDilations(const caffe::ConvolutionParameter &conv_param, std::vector *dilation); + static STATUS ParseDilations(const caffe::ConvolutionParameter &conv_param, std::vector *dilation); - STATUS ParseKernels(const caffe::ConvolutionParameter &conv_param, std::vector *kernel); + static STATUS ParseKernels(const caffe::ConvolutionParameter &conv_param, std::vector *kernel); - int ParseGroup(const caffe::ConvolutionParameter &convParam, const std::string &layerType); + static int ParseGroup(const caffe::ConvolutionParameter &convParam, const std::string &layerType); - int ParseChannelOut(const caffe::ConvolutionParameter &convParam, int32_t *channelOut); + static int ParseChannelOut(const caffe::ConvolutionParameter &convParam, int32_t *channelOut); - STATUS ParseWeight(const caffe::LayerParameter &weight, std::vector *weightVec); + static STATUS ParseWeight(const caffe::LayerParameter &weight, std::vector *weightVec); }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc index d1e054ef0c..0e8149980e 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc @@ -54,7 +54,10 @@ STATUS CaffeConvolutionParser::ParseGroupConvolution(schema::CNodeT *op, schema: STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeConvolutionParser"; - + if (weightVec == nullptr) { + MS_LOG(ERROR) << "weightVec is null"; + return RET_NULL_PTR; + } if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -73,11 +76,10 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const c attr->format = schema::Format_NCHW; - const caffe::ConvolutionParameter convParam = proto.convolution_param(); - CaffeConvBaseParser convParser; + const caffe::ConvolutionParameter &convParam = proto.convolution_param(); // parse pad std::vector pad(4, 0); - auto status = convParser.ParsePads(convParam, &pad); + auto status = CaffeConvBaseParser::ParsePads(convParam, &pad); if (status != RET_OK) { MS_LOG(ERROR) << "ParsePads for " << proto.name().c_str() << " failed"; return RET_ERROR; @@ -89,7 +91,7 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const c // parse stride std::vector stride(2, 0); - status = convParser.ParseStrides(convParam, &stride); + status = CaffeConvBaseParser::ParseStrides(convParam, &stride); if (status != RET_OK) { MS_LOG(ERROR) << "ParseStrides for " << proto.name().c_str() << " failed"; return RET_ERROR; @@ -99,7 +101,7 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const c // parse dilation std::vector dilation(2, 0); - status = convParser.ParseDilations(convParam, &dilation); + status = CaffeConvBaseParser::ParseDilations(convParam, &dilation); if (status != RET_OK) { MS_LOG(ERROR) << "ParseDilations for " << proto.name().c_str() << " failed"; return RET_ERROR; @@ -109,7 +111,7 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const c // parse kernel std::vector kernel(2, 0); - status = convParser.ParseKernels(convParam, &kernel); + status = CaffeConvBaseParser::ParseKernels(convParam, &kernel); if (status != RET_OK) { MS_LOG(ERROR) << "ParseKernels for " << proto.name().c_str() << " failed"; return RET_ERROR; @@ -118,8 +120,8 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const c attr->kernelW = kernel[1]; attr->hasBias = convParam.bias_term(); - attr->group = convParser.ParseGroup(convParam, proto.type()); - auto ret = convParser.ParseChannelOut(convParam, &(attr->channelOut)); + attr->group = CaffeConvBaseParser::ParseGroup(convParam, proto.type()); + auto ret = CaffeConvBaseParser::ParseChannelOut(convParam, &(attr->channelOut)); if (ret != RET_OK) { MS_LOG(ERROR) << "conv channel out failed"; return RET_ERROR; @@ -128,7 +130,6 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const c if (weightBlob.has_shape()) { attr->channelIn = weightBlob.shape().dim(1) * attr->group; } else { - // get shape information from Blob parameters(caffe proto v1) attr->channelIn = weightBlob.channels() * attr->group; } attr->padMode = schema::PadMode_CAFFE; @@ -143,7 +144,7 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const c return RET_ERROR; } - status = convParser.ParseWeight(weight, weightVec); + status = CaffeConvBaseParser::ParseWeight(weight, weightVec); if (status != RET_OK) { MS_LOG(ERROR) << "ParseWeight for " << proto.name().c_str() << " failed"; return RET_ERROR; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.h index 4074397a94..50a411b345 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.h @@ -27,13 +27,13 @@ namespace lite { class CaffeConvolutionParser : public CaffeNodeParser { public: CaffeConvolutionParser() : CaffeNodeParser("convolution") {} - ~CaffeConvolutionParser() = default; + ~CaffeConvolutionParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; private: - STATUS ParseGroupConvolution(schema::CNodeT *op, schema::Conv2DT *attr); + static STATUS ParseGroupConvolution(schema::CNodeT *op, schema::Conv2DT *attr); }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc index 5d47c37e49..e36d1e2059 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc @@ -17,13 +17,15 @@ #include "tools/converter/parser/caffe/caffe_crop_parser.h" #include -const int32_t CROP_AXIS = 2; - namespace mindspore { namespace lite { STATUS CaffeCropParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeCropParser"; + if (weightVec == nullptr) { + MS_LOG(ERROR) << "weightVec is null"; + return RET_NULL_PTR; + } if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -41,22 +43,23 @@ STATUS CaffeCropParser::Parse(const caffe::LayerParameter &proto, const caffe::L } if (!proto.has_crop_param()) { - attr->axis = CROP_AXIS; + attr->axis = 2; std::vector offsets(2, 0); attr->offsets = offsets; } else { - const caffe::CropParameter cropParam = proto.crop_param(); + const caffe::CropParameter &cropParam = proto.crop_param(); if (cropParam.has_axis()) { if (cropParam.axis() == -1) { MS_LOG(WARNING) << "axis with -1 may lead to calculation errors when input less than 4 dims."; } attr->axis = cropParam.axis(); } else { - attr->axis = CROP_AXIS; + attr->axis = 2; } if (cropParam.offset_size() != 0) { std::vector offsets; + offsets.reserve(cropParam.offset_size()); for (int i = 0; i < cropParam.offset_size(); i++) { offsets.push_back(cropParam.offset(i)); } diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.h index 21d108d0dc..8714070431 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffeCropParser : public CaffeNodeParser { public: CaffeCropParser() : CaffeNodeParser("crop") {} - ~CaffeCropParser() = default; + ~CaffeCropParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.cc index c64d9d476d..5f6325f5d5 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.cc @@ -54,7 +54,10 @@ STATUS CaffeDeconvolutionParser::ParseGroupDeconvolution(schema::CNodeT *op, sch STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeDeconvolutionParser"; - + if (weightVec == nullptr) { + MS_LOG(ERROR) << "weightVec is null"; + return RET_NULL_PTR; + } if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -69,11 +72,10 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const attr->format = schema::Format::Format_NCHW; - const caffe::ConvolutionParameter convParam = proto.convolution_param(); - CaffeConvBaseParser convParser; + const caffe::ConvolutionParameter &convParam = proto.convolution_param(); // parse pad std::vector pad(4, 0); - auto status = convParser.ParsePads(convParam, &pad); + auto status = CaffeConvBaseParser::ParsePads(convParam, &pad); if (status != RET_OK) { MS_LOG(ERROR) << "ParsePads for " << proto.name().c_str() << " failed"; return RET_ERROR; @@ -85,7 +87,7 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const // parse stride std::vector stride(2, 0); - status = convParser.ParseStrides(convParam, &stride); + status = CaffeConvBaseParser::ParseStrides(convParam, &stride); if (status != RET_OK) { MS_LOG(ERROR) << "ParseStrides for " << proto.name().c_str() << " failed"; return RET_ERROR; @@ -95,7 +97,7 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const // parse dilation std::vector dilation(2, 0); - status = convParser.ParseDilations(convParam, &dilation); + status = CaffeConvBaseParser::ParseDilations(convParam, &dilation); if (status != RET_OK) { MS_LOG(ERROR) << "ParseDilations for " << proto.name().c_str() << " failed"; return RET_ERROR; @@ -105,7 +107,7 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const // parse kernel std::vector kernel(2, 0); - status = convParser.ParseKernels(convParam, &kernel); + status = CaffeConvBaseParser::ParseKernels(convParam, &kernel); if (status != RET_OK) { MS_LOG(ERROR) << "ParseKernels for " << proto.name().c_str() << " failed"; return RET_ERROR; @@ -114,8 +116,8 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const attr->kernelW = kernel[1]; attr->hasBias = convParam.bias_term(); - attr->group = convParser.ParseGroup(convParam, proto.type()); - auto ret = convParser.ParseChannelOut(convParam, &(attr->channelOut)); + attr->group = CaffeConvBaseParser::ParseGroup(convParam, proto.type()); + auto ret = CaffeConvBaseParser::ParseChannelOut(convParam, &(attr->channelOut)); if (ret != RET_OK) { MS_LOG(ERROR) << "deconv channel get failed"; return RET_ERROR; @@ -127,7 +129,6 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const else attr->channelIn = weightBlob.shape().dim(1) * attr->group; } else { - // get shape information from Blob parameters(caffe proto v1) attr->channelIn = weightBlob.num() * attr->group; } attr->padMode = schema::PadMode_CAFFE; @@ -142,7 +143,7 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const return RET_ERROR; } - status = convParser.ParseWeight(weight, weightVec); + status = CaffeConvBaseParser::ParseWeight(weight, weightVec); if (status != RET_OK) { MS_LOG(ERROR) << "ParseWeight for " << proto.name().c_str() << " failed"; return RET_ERROR; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.h index ed0042e6fd..56707f75da 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.h @@ -27,13 +27,13 @@ namespace lite { class CaffeDeconvolutionParser : public CaffeNodeParser { public: CaffeDeconvolutionParser() : CaffeNodeParser("deconvolution") {} - ~CaffeDeconvolutionParser() = default; + ~CaffeDeconvolutionParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; private: - STATUS ParseGroupDeconvolution(schema::CNodeT *op, schema::DeConv2DT *attr); + static STATUS ParseGroupDeconvolution(schema::CNodeT *op, schema::DeConv2DT *attr); }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc index 99fb5d148e..041bb7cdcf 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc @@ -18,9 +18,6 @@ #include #include -const int ELTWISE_MIN_INPUT_SIZE = 2; -const float ELTWISE_SUM_COEFF_EPSILON = 1e-5; - namespace mindspore { namespace lite { STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, @@ -42,13 +39,13 @@ STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto, const caffe return RET_NULL_PTR; } - if (proto.bottom_size() < ELTWISE_MIN_INPUT_SIZE) { + if (proto.bottom_size() < 2) { MS_LOG(ERROR) << "Eltwise Op " << proto.name() << " need at least 2 inputs,but input size is " << proto.bottom_size(); return RET_ERROR; } - const caffe::EltwiseParameter eltwiseParam = proto.eltwise_param(); + const caffe::EltwiseParameter &eltwiseParam = proto.eltwise_param(); if (eltwiseParam.coeff_size() != 0 && eltwiseParam.coeff_size() != proto.bottom_size()) { MS_LOG(ERROR) << "Coeff size(" << eltwiseParam.coeff_size() << ") check fail, Eltwise Layer takes one coefficient per bottom blob."; @@ -60,8 +57,8 @@ STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto, const caffe return RET_ERROR; } - if (eltwiseParam.coeff_size() != 0 && (fabs(eltwiseParam.coeff(0) - 1) > ELTWISE_SUM_COEFF_EPSILON || - fabs(eltwiseParam.coeff(1) - 1) > ELTWISE_SUM_COEFF_EPSILON)) { + if (eltwiseParam.coeff_size() != 0 && + (std::fabs(eltwiseParam.coeff(0) - 1) > 1e-5 || std::fabs(eltwiseParam.coeff(1) - 1) > 1e-5)) { MS_LOG(ERROR) << "Eltwise only support coefficient 1 for summation now."; return RET_ERROR; } diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.h index 7b1b4739c6..c210666fcb 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffeEltwiseParser : public CaffeNodeParser { public: CaffeEltwiseParser() : CaffeNodeParser("eltwise") {} - ~CaffeEltwiseParser() = default; + ~CaffeEltwiseParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_elu_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_elu_parser.cc index c1eb314291..6e70ec6583 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_elu_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_elu_parser.cc @@ -39,7 +39,7 @@ STATUS CaffeEluParser::Parse(const caffe::LayerParameter &proto, const caffe::La } if (proto.has_elu_param()) { - const caffe::ELUParameter eluParameter = proto.elu_param(); + const caffe::ELUParameter &eluParameter = proto.elu_param(); if (eluParameter.has_alpha()) { attr->alpha = eluParameter.alpha(); } diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_elu_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_elu_parser.h index 25f0faff67..fc544b57f3 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_elu_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_elu_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffeEluParser : public CaffeNodeParser { public: CaffeEluParser() : CaffeNodeParser("elu") {} - ~CaffeEluParser() = default; + ~CaffeEluParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.cc index b6bf0425a9..e23a52104e 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.cc @@ -39,7 +39,7 @@ STATUS CaffeExpParser::Parse(const caffe::LayerParameter &proto, const caffe::La return RET_NULL_PTR; } - const caffe::ExpParameter exp_param = proto.exp_param(); + const caffe::ExpParameter &exp_param = proto.exp_param(); if (exp_param.has_base()) { attr->base = exp_param.base(); } else { diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.h index fd8d50404a..940e59bde0 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffeExpParser : public CaffeNodeParser { public: CaffeExpParser() : CaffeNodeParser("exp") {} - ~CaffeExpParser() = default; + ~CaffeExpParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.h index b9f0e0e499..60668a41b6 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffeFlattenParser : public CaffeNodeParser { public: CaffeFlattenParser() : CaffeNodeParser("flatten") {} - ~CaffeFlattenParser() = default; + ~CaffeFlattenParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.cc index cfdf0831fc..8c5fd191a6 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.cc @@ -22,6 +22,10 @@ namespace lite { STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeInnerProductParser"; + if (weightVec == nullptr) { + MS_LOG(ERROR) << "weightVec is null"; + return RET_NULL_PTR; + } if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -38,7 +42,7 @@ STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto, const return RET_NULL_PTR; } - const caffe::InnerProductParameter innerProductParam = proto.inner_product_param(); + const caffe::InnerProductParameter &innerProductParam = proto.inner_product_param(); if (!innerProductParam.has_num_output()) { MS_LOG(ERROR) << "InnerProduct Parse num_output for " << proto.name().c_str() << " failed."; return RET_ERROR; @@ -62,8 +66,6 @@ STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto, const MS_LOG(ERROR) << "InnerProduct No filter data in layer " << weight.name().c_str(); return RET_ERROR; } - - // parse filter auto filter = ConvertWeight(weight.blobs(0)); if (filter == nullptr) { MS_LOG(ERROR) << "InnerProduct parse weight for layer " << weight.name().c_str() << " failed"; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.h index 09710b2034..dd06c42ae5 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffeInnerProductParser : public CaffeNodeParser { public: CaffeInnerProductParser() : CaffeNodeParser("innerproduct") {} - ~CaffeInnerProductParser() = default; + ~CaffeInnerProductParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_inspector.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_inspector.cc index c5c877cfbb..42bf574c89 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_inspector.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_inspector.cc @@ -47,12 +47,12 @@ STATUS CaffeInspector::ParseInput() { } STATUS CaffeInspector::FindInputAndOutput() { - for (auto iter : layerBottoms) { + for (const auto &iter : layerBottoms) { if (layerTops.find(iter) == layerTops.end()) { graphInput.insert(iter); } } - for (auto iter : layerTops) { + for (const auto &iter : layerTops) { if (layerBottoms.find(iter) == layerBottoms.end()) { graphOutput.insert(iter); } @@ -62,7 +62,7 @@ STATUS CaffeInspector::FindInputAndOutput() { STATUS CaffeInspector::SetTopsAndBottoms() { for (int32_t i = 0; i < net.layer_size(); i++) { - caffe::LayerParameter &layer = const_cast(net.layer(i)); + auto &layer = const_cast(net.layer(i)); if (layer.top_size() == 1 && layer.bottom_size() == 1 && layer.top(0) == layer.bottom(0)) { continue; } diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc index be11c2e2c7..d607002e16 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc @@ -38,7 +38,7 @@ STATUS CaffeInterpParser::Parse(const caffe::LayerParameter &proto, const caffe: return RET_NULL_PTR; } - const caffe::InterpParameter interpParam = proto.interp_param(); + const caffe::InterpParameter &interpParam = proto.interp_param(); if (interpParam.has_height()) { int64_t height = interpParam.height(); if (height < 0) { diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.h index bab33099f2..decf497470 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffeInterpParser : public CaffeNodeParser { public: CaffeInterpParser() : CaffeNodeParser("Interp") {} - ~CaffeInterpParser() = default; + ~CaffeInterpParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc index 6da25d56ac..15bf3e32d2 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc @@ -23,6 +23,11 @@ namespace mindspore { namespace lite { schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) { std::unique_ptr weight = std::make_unique(); + if (weight == nullptr) { + MS_LOG(ERROR) << "new weight failed"; + return nullptr; + } + weight->format = schema::Format::Format_NCHW; std::vector shapeVec; ConvertShape(proto, &shapeVec); @@ -32,8 +37,7 @@ schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) { // cal Weight num int count = 1; - for (size_t i = 0; i < shapeVec.size(); ++i) { - int dim = shapeVec[i]; + for (int dim : shapeVec) { if (dim <= 0) { MS_LOG(ERROR) << "Convert weight fail, Blob size invalid"; return nullptr; @@ -48,6 +52,7 @@ schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) { // get weight std::unique_ptr buf = std::make_unique(count); if (buf == nullptr) { + MS_LOG(ERROR) << "new weight buf failed"; return nullptr; } if (proto.double_data_size() > 0) { @@ -74,6 +79,7 @@ schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) { << "blob.data_size:%d" << proto.data_size(); return nullptr; } + weight->data.resize(count * sizeof(float)); const float *data_ptr = proto.data().data(); if (data_ptr == nullptr) { @@ -91,8 +97,12 @@ schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) { } STATUS ConvertShape(const caffe::BlobProto &proto, std::vector *shape) { - shape->clear(); + if (shape == nullptr) { + MS_LOG(ERROR) << "shape is null"; + return RET_ERROR; + } + shape->clear(); if (proto.has_num() || proto.has_channels() || proto.has_height() || proto.has_width()) { shape->push_back(proto.num()); shape->push_back(proto.channels()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser_registry.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser_registry.cc index 57c978c7ba..e8ffd46749 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser_registry.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser_registry.cc @@ -18,7 +18,7 @@ namespace mindspore { namespace lite { -CaffeNodeParserRegistry::CaffeNodeParserRegistry() {} +CaffeNodeParserRegistry::CaffeNodeParserRegistry() = default; CaffeNodeParserRegistry::~CaffeNodeParserRegistry() { for (auto ite : parsers) { diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc index cc74fe6942..d1a75ba2b2 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc @@ -38,7 +38,7 @@ STATUS CaffePermuteParser::Parse(const caffe::LayerParameter &proto, const caffe return RET_NULL_PTR; } - const caffe::PermuteParameter permuteParam = proto.permute_param(); + const caffe::PermuteParameter &permuteParam = proto.permute_param(); const int num_order_dims = permuteParam.order_size(); attr->perm.resize(num_order_dims); for (int i = 0; i < num_order_dims; ++i) { diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.h index c4df48b66e..7d43ff3d3b 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffePermuteParser : public CaffeNodeParser { public: CaffePermuteParser() : CaffeNodeParser("Permute") {} - ~CaffePermuteParser() = default; + ~CaffePermuteParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.cc index f570bbb6d0..de3329283a 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.cc @@ -17,9 +17,6 @@ #include "tools/converter/parser/caffe/caffe_pooling_parser.h" #include -const uint32_t INNERPRODUCT_WINDOW_DEFAULT_VALUE = 0; -const uint32_t INNERPRODUCT_PAD_DEFAULT_VALUE = 0; - namespace mindspore { namespace lite { STATUS CaffePoolingParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, @@ -43,7 +40,7 @@ STATUS CaffePoolingParser::Parse(const caffe::LayerParameter &proto, const caffe attr->format = schema::Format::Format_NCHW; - const caffe::PoolingParameter poolingParam = proto.pooling_param(); + const caffe::PoolingParameter &poolingParam = proto.pooling_param(); auto status = ParsePads(poolingParam, attr.get()); if (status != RET_OK) { MS_LOG(ERROR) << "ParsePads for " << proto.name().c_str() << " failed"; @@ -68,15 +65,12 @@ STATUS CaffePoolingParser::Parse(const caffe::LayerParameter &proto, const caffe return RET_ERROR; } - // default roundMode RoundMode_CEIL attr->roundMode = schema::RoundMode_CEIL; if (poolingParam.has_round_mode()) { if (poolingParam.round_mode() == caffe::PoolingParameter_RoundMode_FLOOR) { attr->roundMode = schema::RoundMode_FLOOR; } else if (poolingParam.round_mode() == caffe::PoolingParameter_RoundMode_CEIL) { attr->roundMode = schema::RoundMode_CEIL; - } else { - MS_ASSERT(false); } } attr->padMode = schema::PadMode_CAFFE; @@ -127,8 +121,8 @@ STATUS CaffePoolingParser::ParseWindows(const caffe::PoolingParameter &poolingPa MS_LOG(ERROR) << "With Global_pooling: true Filter size cannot specified"; return RET_ERROR; } - attr->windowH = INNERPRODUCT_WINDOW_DEFAULT_VALUE; - attr->windowW = INNERPRODUCT_WINDOW_DEFAULT_VALUE; + attr->windowH = 0; + attr->windowW = 0; attr->global = true; } else { if (poolingParam.has_kernel_size() == (poolingParam.has_kernel_h() || poolingParam.has_kernel_w())) { @@ -157,7 +151,7 @@ STATUS CaffePoolingParser::ParsePoolingMode(const caffe::PoolingParameter &pooli } else if (poolingParam.pool() == caffe::PoolingParameter::AVE) { attr->poolingMode = schema::PoolMode_MEAN_POOLING; } else { - MS_LOG(ERROR) << "Pooling param`s PoolingMode is not MAX either AVE. MindSpore support MAX and AVE only."; + MS_LOG(ERROR) << "MindSpore support MAX and AVE PoolingMode only."; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.h index 72079e4ef8..9978f55e31 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.h @@ -26,18 +26,18 @@ namespace lite { class CaffePoolingParser : public CaffeNodeParser { public: CaffePoolingParser() : CaffeNodeParser("pooling") {} - ~CaffePoolingParser() = default; + ~CaffePoolingParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; - STATUS ParsePads(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr); + static STATUS ParsePads(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr); - STATUS ParseStrides(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr); + static STATUS ParseStrides(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr); - STATUS ParseWindows(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr); + static STATUS ParseWindows(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr); - STATUS ParsePoolingMode(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr); + static STATUS ParsePoolingMode(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr); }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.cc index 6efc5cecb8..ceb704870c 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.cc @@ -18,10 +18,6 @@ #include #include -static const float CAFFE_POWER_DEFAULT_POWER = 1.0; -static const float CAFFE_POWER_DEFAULT_SCALE = 1.0; -static const float CAFFE_POWER_DEFAULT_SHIFT = 0.0; - namespace mindspore { namespace lite { STATUS CaffePowerParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, @@ -43,15 +39,15 @@ STATUS CaffePowerParser::Parse(const caffe::LayerParameter &proto, const caffe:: return RET_NULL_PTR; } - const caffe::PowerParameter powerParam = proto.power_param(); + const caffe::PowerParameter &powerParam = proto.power_param(); if (proto.has_power_param()) { - attr->power = powerParam.has_power() ? powerParam.power() : CAFFE_POWER_DEFAULT_POWER; - attr->scale = powerParam.has_scale() ? powerParam.scale() : CAFFE_POWER_DEFAULT_SCALE; - attr->shift = powerParam.has_shift() ? powerParam.shift() : CAFFE_POWER_DEFAULT_SHIFT; + attr->power = powerParam.has_power() ? powerParam.power() : 1.0; + attr->scale = powerParam.has_scale() ? powerParam.scale() : 1.0; + attr->shift = powerParam.has_shift() ? powerParam.shift() : 0.0; } else { - attr->power = CAFFE_POWER_DEFAULT_POWER; - attr->scale = CAFFE_POWER_DEFAULT_SCALE; - attr->shift = CAFFE_POWER_DEFAULT_SHIFT; + attr->power = 1.0; + attr->scale = 1.0; + attr->shift = 0.0; } op->name = proto.name(); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.h index acc66aa783..9c5c8b9c3e 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffePowerParser : public CaffeNodeParser { public: CaffePowerParser() : CaffeNodeParser("power") {} - ~CaffePowerParser() = default; + ~CaffePowerParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc index 16757783c6..dc75a3928d 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc @@ -22,6 +22,10 @@ namespace lite { STATUS CaffePReluParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffePReluParser"; + if (weightVec == nullptr) { + MS_LOG(ERROR) << "weightVec is null"; + return RET_NULL_PTR; + } if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -38,7 +42,7 @@ STATUS CaffePReluParser::Parse(const caffe::LayerParameter &proto, const caffe:: return RET_NULL_PTR; } - const caffe::PReLUParameter pReluParam = proto.prelu_param(); + const caffe::PReLUParameter &pReluParam = proto.prelu_param(); if (pReluParam.has_channel_shared()) { attr->channelShared = pReluParam.channel_shared(); } else { @@ -49,7 +53,6 @@ STATUS CaffePReluParser::Parse(const caffe::LayerParameter &proto, const caffe:: MS_LOG(ERROR) << "PRelu No blobs data in layer " << proto.name().c_str(); return RET_ERROR; } - auto slope = ConvertWeight(weight.blobs(0)); if (slope == nullptr) { MS_LOG(ERROR) << "CaffePRelu convert slope for layer " << weight.name().c_str() << " failed."; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.h index 9010d41d86..f921e55233 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffePReluParser : public CaffeNodeParser { public: CaffePReluParser() : CaffeNodeParser("pRelu") {} - ~CaffePReluParser() = default; + ~CaffePReluParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.cc index 3d2854975f..23334f8558 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.cc @@ -39,7 +39,7 @@ STATUS CaffeReduceParser::Parse(const caffe::LayerParameter &proto, const caffe: return RET_NULL_PTR; } - const caffe::ReductionParameter reduce_param = proto.reduction_param(); + const caffe::ReductionParameter &reduce_param = proto.reduction_param(); if (reduce_param.has_operation()) { switch (reduce_param.operation()) { case caffe::ReductionParameter_ReductionOp_MEAN: @@ -72,6 +72,7 @@ STATUS CaffeReduceParser::Parse(const caffe::LayerParameter &proto, const caffe: } attr->reduceToEnd = true; attr->keepDims = false; + op->name = proto.name(); op->primitive->value.type = schema::PrimitiveType_Reduce; op->primitive->value.value = attr.release(); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.h index b9af143069..2ccc69879a 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffeReduceParser : public CaffeNodeParser { public: CaffeReduceParser() : CaffeNodeParser("reduce") {} - ~CaffeReduceParser() = default; + ~CaffeReduceParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.cc index 94b51335f7..becae39f93 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.cc @@ -39,8 +39,6 @@ STATUS CaffeRelu6Parser::Parse(const caffe::LayerParameter &proto, const caffe:: } attr->type = schema::ActivationType_RELU6; - // relu: negative_slope = 0, no parameter; - // leakyrelu: negative_slope != 0; if (proto.has_relu_param() && proto.relu_param().has_negative_slope()) { float negative_slope = proto.relu_param().negative_slope(); if (0 != negative_slope) { diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.h index 8a9d8e22e9..09c620b03b 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.h @@ -25,7 +25,7 @@ namespace lite { class CaffeRelu6Parser : public CaffeNodeParser { public: CaffeRelu6Parser() : CaffeNodeParser("relu6") {} - ~CaffeRelu6Parser() = default; + ~CaffeRelu6Parser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc index 44c6eb0247..cd3331345e 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc @@ -40,7 +40,7 @@ STATUS CaffeReshapeParser::Parse(const caffe::LayerParameter &proto, const caffe attr->format = schema::Format::Format_NCHW; - const caffe::ReshapeParameter reshapeParam = proto.reshape_param(); + const caffe::ReshapeParameter &reshapeParam = proto.reshape_param(); if (!reshapeParam.has_shape()) { MS_LOG(ERROR) << "Reshape has no shape info, ret fail"; return RET_ERROR; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.h index 5b69d0a2bd..6de6736e41 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffeReshapeParser : public CaffeNodeParser { public: CaffeReshapeParser() : CaffeNodeParser("reshape") {} - ~CaffeReshapeParser() = default; + ~CaffeReshapeParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.cc index 9c58531c9f..d3a8576f2b 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.cc @@ -17,14 +17,15 @@ #include "tools/converter/parser/caffe/caffe_scale_parser.h" #include -const int32_t NCHW_DIM_C = 1; -const int32_t DIM_DEFAULT_SIZE = 4; - namespace mindspore { namespace lite { STATUS CaffeScaleParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) { MS_LOG(DEBUG) << "parse CaffeScaleParser"; + if (weightVec == nullptr) { + MS_LOG(ERROR) << "weightVec is null"; + return RET_NULL_PTR; + } if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -47,10 +48,10 @@ STATUS CaffeScaleParser::Parse(const caffe::LayerParameter &proto, const caffe:: return RET_ERROR; } - const caffe::ScaleParameter scaleParam = weight.scale_param(); - int axis = NCHW_DIM_C; + const caffe::ScaleParameter &scaleParam = weight.scale_param(); + int axis = 1; if (scaleParam.has_axis()) { - uint32_t axis_index = NCHW_DIM_C; + uint32_t axis_index = 1; if (GetAxisIndex(scaleParam.axis(), &axis_index)) { MS_LOG(ERROR) << "scale get axis failed for layer " << weight.name().c_str(); return RET_ERROR; @@ -93,7 +94,7 @@ STATUS CaffeScaleParser::Parse(const caffe::LayerParameter &proto, const caffe:: } STATUS CaffeScaleParser::GetAxisIndex(const int32_t &axis, uint32_t *axis_index) { - if (axis < -DIM_DEFAULT_SIZE || axis >= DIM_DEFAULT_SIZE) { + if (axis < -4 || axis >= 4) { MS_LOG(ERROR) << "Scale axis value(" << axis << ") is not correct"; return RET_ERROR; } @@ -102,7 +103,7 @@ STATUS CaffeScaleParser::GetAxisIndex(const int32_t &axis, uint32_t *axis_index) MS_LOG(WARNING) << "axis with -1 may lead to calculation errors when input less than 4 dims."; } - *axis_index = (axis + DIM_DEFAULT_SIZE) % DIM_DEFAULT_SIZE; + *axis_index = (axis + 4) % 4; return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.h index 4896707329..10dcc708da 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.h @@ -26,12 +26,12 @@ namespace lite { class CaffeScaleParser : public CaffeNodeParser { public: CaffeScaleParser() : CaffeNodeParser("scale") {} - ~CaffeScaleParser() = default; + ~CaffeScaleParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; - STATUS GetAxisIndex(const int32_t &axis, uint32_t *axis_index); + static STATUS GetAxisIndex(const int32_t &axis, uint32_t *axis_index); }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.h index 6715ca3a70..d2e32523b3 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffeSigmoidParser : public CaffeNodeParser { public: CaffeSigmoidParser() : CaffeNodeParser("sigmoid") {} - ~CaffeSigmoidParser() = default; + ~CaffeSigmoidParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_slice_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_slice_parser.cc index 24b992e122..976718874e 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_slice_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_slice_parser.cc @@ -33,7 +33,6 @@ STATUS CaffeSliceParser::Parse(const caffe::LayerParameter &proto, const caffe:: } std::unique_ptr attr = std::make_unique(); - if (attr == nullptr) { MS_LOG(ERROR) << "new op failed"; return RET_NULL_PTR; @@ -56,12 +55,12 @@ STATUS CaffeSliceParser::Parse(const caffe::LayerParameter &proto, const caffe:: attr->sizeSplits = size_splits; } - // The axis along which to slice -- may be negative to index from the end (e.g., -1 for the last axis). if (slice_param.has_axis()) { attr->splitDim = slice_param.axis(); } else if (slice_param.has_slice_dim()) { attr->splitDim = slice_param.slice_dim(); } + op->name = proto.name(); op->primitive->value.type = schema::PrimitiveType_Split; op->primitive->value.value = attr.release(); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_slice_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_slice_parser.h index 8cd3820bf9..2500a9fe31 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_slice_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_slice_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffeSliceParser : public CaffeNodeParser { public: CaffeSliceParser() : CaffeNodeParser("slice") {} - ~CaffeSliceParser() = default; + ~CaffeSliceParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.cc index 0e10625179..9c6e0e9258 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.cc @@ -17,8 +17,6 @@ #include "tools/converter/parser/caffe/caffe_softmax_parser.h" #include -static const int32_t CAFFE_SOFTMAX_DEFAULT_AXIS = 1; - namespace mindspore { namespace lite { STATUS CaffeSoftmaxParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, @@ -42,11 +40,11 @@ STATUS CaffeSoftmaxParser::Parse(const caffe::LayerParameter &proto, const caffe if (proto.has_softmax_param() && proto.softmax_param().has_axis()) { if (proto.softmax_param().axis() == -1) { - MS_LOG(ERROR) << "axis with -1 may lead to calculation errors when input less than 4 dims."; + MS_LOG(DEBUG) << "axis with -1 may lead to calculation errors when input less than 4 dims."; } attr->axis = proto.softmax_param().axis(); } else { - attr->axis = CAFFE_SOFTMAX_DEFAULT_AXIS; + attr->axis = 1; } op->name = proto.name(); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.h index 2f3d734218..796df196b1 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffeSoftmaxParser : public CaffeNodeParser { public: CaffeSoftmaxParser() : CaffeNodeParser("softmax") {} - ~CaffeSoftmaxParser() = default; + ~CaffeSoftmaxParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.h index 6f6201829d..03f7b32d28 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffeTanhParser : public CaffeNodeParser { public: CaffeTanhParser() : CaffeNodeParser("tanh") {} - ~CaffeTanhParser() = default; + ~CaffeTanhParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.cc index 7efe74e9fb..4cd060fd10 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.cc @@ -39,7 +39,7 @@ STATUS CaffeTileParser::Parse(const caffe::LayerParameter &proto, const caffe::L return RET_NULL_PTR; } - const caffe::TileParameter tile_param = proto.tile_param(); + const caffe::TileParameter &tile_param = proto.tile_param(); std::vector dims; std::vector multiples; dims.clear(); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.h index 5a18d04933..b9dee3c7d4 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.h @@ -26,7 +26,7 @@ namespace lite { class CaffeTileParser : public CaffeNodeParser { public: CaffeTileParser() : CaffeNodeParser("tile") {} - ~CaffeTileParser() = default; + ~CaffeTileParser() override = default; STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op, std::vector *weightVec) override; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.cc index 173ddde1b9..e72ffa3626 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.cc @@ -18,6 +18,7 @@ #include "tools/converter/parser/onnx/onnx_tensor_parser.h" #include #include +#include namespace mindspore { namespace lite { @@ -266,7 +267,6 @@ STATUS OnnxEltwiseParser::Parse(const onnx::GraphProto &onnx_graph, const onnx:: return RET_NULL_PTR; } - // there is no Prod in onnx if (onnx_node.op_type() == "Sum") { attr->mode = schema::EltwiseMode_SUM; } else if (onnx_node.op_type() == "Max") { diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.h index fe50727f1f..b6533f5188 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.h @@ -50,13 +50,6 @@ class OnnxDivParser : public OnnxNodeParser { STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; -class OnnxMeanParser : public OnnxNodeParser { - public: - OnnxMeanParser() : OnnxNodeParser("Mean") {} - ~OnnxMeanParser() override = default; - STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; -}; - class OnnxPowParser : public OnnxNodeParser { public: OnnxPowParser() : OnnxNodeParser("Power") {} diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.cc index 69ee22f8b7..66742a5206 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.cc @@ -38,7 +38,6 @@ STATUS OnnxBiasAddParser::Parse(const onnx::GraphProto &onnx_graph, const onnx:: return RET_NULL_PTR; } - // use channel dim as axis attr->axis = {1}; op->primitive->value.type = schema::PrimitiveType_BiasAdd; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_constant_of_shape_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_constant_of_shape_parser.cc index 4210d73550..ee3cb1c2c6 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_constant_of_shape_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_constant_of_shape_parser.cc @@ -52,7 +52,7 @@ STATUS OnnxConstantOfShapeParser::Parse(const onnx::GraphProto &onnx_graph, cons attr->value.push_back(static_cast(onnx_node_attr.i())); break; case onnx::AttributeProto_AttributeType_TENSOR: { - auto tensor = onnx_node_attr.t(); + const auto &tensor = onnx_node_attr.t(); auto ret = GetTensorDataFromOnnx(tensor, &attr->value, &attr->dataType); if (ret != RET_OK) { return ret; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc index 59058ae13a..13e3f0130b 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc @@ -67,7 +67,7 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod MS_LOG(ERROR) << "new op failed"; return RET_NULL_PTR; } - // set default params + attr->strideH = 1; attr->strideW = 1; attr->dilateH = 1; @@ -75,6 +75,7 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod attr->group = 1; attr->padMode = schema::PadMode_NOTSET; attr->format = schema::Format::Format_NCHW; + // set opdef each attr params for (const auto &onnx_node_attr : onnx_node.attribute()) { if (onnx_node_attr.name() == "group") { @@ -157,8 +158,10 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod auto iter = std::find_if((*nodeIter).attribute().begin(), (*nodeIter).attribute().end(), [](const onnx::AttributeProto &attr) { return attr.name() == "shape"; }); if (iter != (*nodeIter).attribute().end()) { - MS_ASSERT(iter->ints().begin() != nullptr); - MS_ASSERT(iter->ints().end() != nullptr); + if (iter->ints().begin() == nullptr || iter->ints().end() == nullptr) { + MS_LOG(ERROR) << "dims insert failed"; + return RET_ERROR; + } dims.insert(dims.begin(), iter->ints().begin(), iter->ints().end()); } attr->channelOut = dims[0]; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_converter.h b/mindspore/lite/tools/converter/parser/onnx/onnx_converter.h index a04a3e456c..3344ba7b13 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_converter.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_converter.h @@ -28,7 +28,7 @@ class OnnxConverter : public Converter { public: OnnxConverter(); - ~OnnxConverter() = default; + ~OnnxConverter() override = default; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc index 729bfcc645..b966a8fd5d 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc @@ -71,14 +71,12 @@ STATUS OnnxDeConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::N return RET_NULL_PTR; } - // set default params attr->padMode = schema::PadMode_NOTSET; attr->group = 1; attr->strideW = 1; attr->strideH = 1; attr->dilateW = 1; attr->dilateH = 1; - // set opdef each attr params for (const auto &onnx_node_attr : onnx_node.attribute()) { if (onnx_node_attr.name() == "group") { attr->group = static_cast(onnx_node_attr.i()); @@ -144,10 +142,14 @@ STATUS OnnxDeConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::N } std::vector weight_shape; auto size = (*nodeIter).dims_size(); + weight_shape.reserve(size); for (int i = 0; i < size; ++i) { weight_shape.emplace_back((*nodeIter).dims(i)); } - MS_ASSERT(weight_shape.size() == 4); + if (weight_shape.size() != 4) { + MS_LOG(ERROR) << "weight_shape.size() should be 4, but is " << weight_shape.size(); + return RET_ERROR; + } attr->channelIn = weight_shape[0]; attr->channelOut = weight_shape[1] * attr->group; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.h index 18f20573de..a0a77e8058 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.h @@ -31,7 +31,7 @@ class OnnxDeConvParser : public OnnxNodeParser { STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; private: - bool ParseGroupDeConvolution(const std::unique_ptr &attr, schema::CNodeT *op); + static bool ParseGroupDeConvolution(const std::unique_ptr &attr, schema::CNodeT *op); }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.cc index fdce9d8253..46fb4288ec 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.cc @@ -48,11 +48,10 @@ STATUS OnnxExpandParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::N MS_LOG(ERROR) << "can not find node: " << onnx_expand_power; return RET_ERROR; } - const int64_t *dataPtr = nullptr; for (const auto &attrPower : nodeIter->attribute()) { if (attrPower.name() == "value") { const auto &t = attrPower.t(); - dataPtr = reinterpret_cast(t.raw_data().data()); + auto *dataPtr = reinterpret_cast(t.raw_data().data()); for (int i = 0; i < t.dims(0); ++i) { dst_shape.emplace_back(dataPtr[i]); } diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_lp_norm_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_lp_norm_parser.h index 03cbeb3005..4c2a928864 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_lp_norm_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_lp_norm_parser.h @@ -25,7 +25,7 @@ namespace lite { class OnnxLpNormParser : public OnnxNodeParser { public: OnnxLpNormParser() : OnnxNodeParser("LpNorm") {} - ~OnnxLpNormParser() = default; + ~OnnxLpNormParser() override = default; STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_lstm_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_lstm_parser.cc index 6d4c205fc0..7c755b83ef 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_lstm_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_lstm_parser.cc @@ -39,7 +39,7 @@ STATUS OnnxLstmParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod for (const auto &onnx_node_attr : onnx_node.attribute()) { if (onnx_node_attr.name() == "direction") { - auto direction = onnx_node_attr.s(); + const auto &direction = onnx_node_attr.s(); attr->bidirection = direction == "bidirectional"; } } diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.h index 30d2db0033..f428bbc0c7 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.h @@ -18,6 +18,7 @@ #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_NODE_PARSER_H #include +#include #include #include "google/protobuf/message.h" #include "proto/onnx.pb.h" @@ -29,13 +30,13 @@ namespace mindspore { namespace lite { class OnnxNodeParser { public: - explicit OnnxNodeParser(const std::string nodeName) : name(nodeName) {} + explicit OnnxNodeParser(std::string nodeName) : name(std::move(nodeName)) {} virtual ~OnnxNodeParser() = default; virtual STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) = 0; - STATUS GetTensorDataFromOnnx(const onnx::TensorProto &onnx_tensor, std::vector *value, int *type); + static STATUS GetTensorDataFromOnnx(const onnx::TensorProto &onnx_tensor, std::vector *value, int *type); static STATUS set_opset_version(int version) { opset_version_ = version; @@ -44,9 +45,9 @@ class OnnxNodeParser { static int opset_version() { return opset_version_; } protected: - schema::PadMode GetOnnxPadMode(const onnx::AttributeProto &onnx_node_attr); + static schema::PadMode GetOnnxPadMode(const onnx::AttributeProto &onnx_node_attr); - void Split(const std::string &src_str, std::vector *dst_str, const std::string &chr); + static void Split(const std::string &src_str, std::vector *dst_str, const std::string &chr); const std::string name; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser_registry.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser_registry.cc index ce9b630917..a1e96d5e2f 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser_registry.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser_registry.cc @@ -40,13 +40,6 @@ OnnxNodeParser *OnnxNodeParserRegistry::GetNodeParser(const std::string &name) { if (it != parsers.end()) { return it->second; } - /* should not support vague name, otherwise may get wrong parser. ex. PRelu and Relu - for (auto const &i : parsers) { - if (name.find(i.first) != std::string::npos) { - return i.second; - } - } - */ return nullptr; } } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.cc index ea041867a6..63bce29749 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.cc @@ -15,7 +15,6 @@ */ #include "tools/converter/parser/onnx/onnx_pool_parser.h" - #include namespace mindspore { diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc index fb0f449b48..2079a900c9 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc @@ -15,7 +15,6 @@ */ #include "tools/converter/parser/onnx/onnx_relu_parser.h" - #include #include #include "securec/include/securec.h" @@ -63,7 +62,6 @@ STATUS OnnxReluParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod STATUS OnnxPReluParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) { MS_LOG(DEBUG) << "onnx PReluParser"; - if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -113,7 +111,7 @@ STATUS OnnxPReluParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::No } OnnxNodeRegistrar g_onnxReluParser("Relu", new OnnxReluParser()); -OnnxNodeRegistrar g_onnxLeakyReluParser("LeakyRelu", new OnnxLeakeyReluParser()); +OnnxNodeRegistrar g_onnxLeakyReluParser("LeakyRelu", new OnnxReluParser()); OnnxNodeRegistrar g_onnxPReluParser("PRelu", new OnnxPReluParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.h index cc23303ca0..95d4303c41 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.h @@ -30,12 +30,6 @@ class OnnxReluParser : public OnnxNodeParser { STATUS Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) override; }; -class OnnxLeakeyReluParser : public OnnxReluParser { - public: - OnnxLeakeyReluParser() : OnnxReluParser() {} - ~OnnxLeakeyReluParser() override = default; -}; - class OnnxPReluParser : public OnnxNodeParser { public: OnnxPReluParser() : OnnxNodeParser("Prelu") {} diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_topk_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_topk_parser.cc index ef19e9623c..299b3efc31 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_topk_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_topk_parser.cc @@ -43,7 +43,6 @@ STATUS OnnxTopkParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod attr->k = static_cast(onnx_node_attr.i()); } } - // attr->sorted; op->primitive->value.type = schema::PrimitiveType_TopK; op->primitive->value.value = attr.release(); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.cc index 2430d5a9cb..5fee208ad8 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.cc @@ -41,13 +41,7 @@ STATUS OnnxTransposeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx attr->conjugate = false; for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); - if (attribute_name == "axes") { - attr->perm.resize(onnx_node_attr.ints_size()); - for (int i = 0; i < onnx_node_attr.ints_size(); ++i) { - attr->perm[i] = onnx_node_attr.ints(i); - } - } - if (attribute_name == "perm") { + if (attribute_name == "axes" || attribute_name == "perm") { attr->perm.resize(onnx_node_attr.ints_size()); for (int i = 0; i < onnx_node_attr.ints_size(); ++i) { attr->perm[i] = onnx_node_attr.ints(i); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_upsample_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_upsample_parser.cc index 48ab1855c3..7a2a3acad7 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_upsample_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_upsample_parser.cc @@ -15,7 +15,6 @@ */ #include "tools/converter/parser/onnx/onnx_upsample_parser.h" - #include namespace mindspore { diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc index 9224d823f8..0b2b264fbd 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc @@ -18,7 +18,6 @@ #include #include #include -#include namespace mindspore { namespace lite { @@ -26,6 +25,9 @@ STATUS TfliteActivationParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_op, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -71,6 +73,9 @@ STATUS TfliteActivationParser::Parse(TfliteTensorsInfo *tensors_info, } attr->alpha = tflite_attr->alpha; attr->type = schema::ActivationType_LEAKY_RELU; + } else { + MS_LOG(ERROR) << node_name << " hasn't been supported"; + return RET_NOT_FIND_OP; } op->primitive->value.type = schema::PrimitiveType_Activation; @@ -81,12 +86,12 @@ STATUS TfliteActivationParser::Parse(TfliteTensorsInfo *tensors_info, return RET_OK; } -TfliteNodeRegister g_TfliteReluParser("Relu", new TfliteReluParser()); -TfliteNodeRegister g_TfliteRelu6Parser("Relu6", new TfliteRelu6Parser()); -TfliteNodeRegister g_TfliteTanhParser("Tanh", new TfliteTanhParser()); -TfliteNodeRegister g_TfliteSwishParser("Swish", new TfliteSwishParser()); -TfliteNodeRegister g_TfliteHardSwishParser("HardSwish", new TfliteHardSwishParser()); -TfliteNodeRegister g_tfliteLogisticParser("Logistic", new TfliteLogisticParser()); -TfliteNodeRegister g_TfliteLeakyReluParser("LeakyRelu", new TfliteLeakyReluParser()); +TfliteNodeRegister g_tfliteReluParser("Relu", new TfliteActivationParser()); +TfliteNodeRegister g_tfliteRelu6Parser("Relu6", new TfliteActivationParser()); +TfliteNodeRegister g_tfliteTanhParser("Tanh", new TfliteActivationParser()); +TfliteNodeRegister g_tfliteSwishParser("Swish", new TfliteActivationParser()); +TfliteNodeRegister g_tfliteHardSwishParser("HardSwish", new TfliteActivationParser()); +TfliteNodeRegister g_tfliteLogisticParser("Logistic", new TfliteActivationParser()); +TfliteNodeRegister g_tfliteLeakyReluParser("LeakyRelu", new TfliteActivationParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h index 908f44491d..6418678c60 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h @@ -34,41 +34,6 @@ class TfliteActivationParser : public TfliteNodeParser { const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) override; }; -class TfliteReluParser : public TfliteActivationParser { - public: - TfliteReluParser() : TfliteActivationParser() {} -}; - -class TfliteRelu6Parser : public TfliteActivationParser { - public: - TfliteRelu6Parser() : TfliteActivationParser() {} -}; - -class TfliteTanhParser : public TfliteActivationParser { - public: - TfliteTanhParser() : TfliteActivationParser() {} -}; - -class TfliteLogisticParser : public TfliteActivationParser { - public: - TfliteLogisticParser() : TfliteActivationParser() {} -}; - -class TfliteSwishParser : public TfliteActivationParser { - public: - TfliteSwishParser() : TfliteActivationParser() {} -}; - -class TfliteHardSwishParser : public TfliteActivationParser { - public: - TfliteHardSwishParser() : TfliteActivationParser() {} -}; - -class TfliteLeakyReluParser : public TfliteActivationParser { - public: - TfliteLeakyReluParser() : TfliteActivationParser() {} -}; - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc index e766fb4a52..6c4353b9eb 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc @@ -18,7 +18,6 @@ #include "tools/converter/parser/tflite/tflite_addn_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -26,6 +25,9 @@ STATUS TfliteAddNParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniqu const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteAddNParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -43,11 +45,12 @@ STATUS TfliteAddNParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniqu } attr->N = tflite_subgraph->tensors.size() - 1; + op->primitive->value.type = schema::PrimitiveType_AddN; op->primitive->value.value = attr.release(); - for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_info, tflite_op->inputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int input : tflite_op->inputs) { + AddOpInput(op, tensors_info, input, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } AddOpOutput(op, tensors_info, tflite_op->outputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); return RET_OK; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.cc index be78876a39..568a096401 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.cc @@ -25,6 +25,9 @@ STATUS TfliteArgmaxParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteArgmaxParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -48,7 +51,12 @@ STATUS TfliteArgmaxParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni // get axis attr auto axis_idx = tflite_op->inputs[1]; - auto buffer_idx = tflite_subgraph->tensors[axis_idx]->buffer; + auto axis_tensor = tflite_subgraph->tensors[axis_idx].get(); + if (axis_tensor == nullptr) { + MS_LOG(ERROR) << "axis_tensor is null"; + return RET_NULL_PTR; + } + auto buffer_idx = axis_tensor->buffer; auto &buf_data = tflite_model->buffers[buffer_idx]; if (buf_data == nullptr) { MS_LOG(ERROR) << "the buf data is null"; @@ -69,6 +77,6 @@ STATUS TfliteArgmaxParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni return RET_OK; } -TfliteNodeRegister g_TfliteArgmaxParser("Argmax", new TfliteArgmaxParser()); +TfliteNodeRegister g_tfliteArgmaxParser("Argmax", new TfliteArgmaxParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.cc index 4d87a4e7fb..b1d2846aa5 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.cc @@ -25,6 +25,9 @@ STATUS TfliteArgminParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteArgminParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -48,7 +51,12 @@ STATUS TfliteArgminParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni // get axis attr auto axis_idx = tflite_op->inputs[1]; - auto buffer_idx = tflite_subgraph->tensors[axis_idx]->buffer; + auto axis_tensor = tflite_subgraph->tensors[axis_idx].get(); + if (axis_tensor == nullptr) { + MS_LOG(ERROR) << "axis_tensor is null"; + return RET_NULL_PTR; + } + auto buffer_idx = axis_tensor->buffer; auto &buf_data = tflite_model->buffers[buffer_idx]; if (buf_data == nullptr) { MS_LOG(ERROR) << "the buf data is null"; @@ -69,6 +77,6 @@ STATUS TfliteArgminParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni return RET_OK; } -TfliteNodeRegister g_TfliteArgminParser("Argmin", new TfliteArgminParser()); +TfliteNodeRegister g_tfliteArgminParser("Argmin", new TfliteArgminParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.cc index af5637e28a..0b37afe175 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.cc @@ -18,7 +18,6 @@ #include #include #include -#include namespace mindspore { namespace lite { @@ -26,6 +25,9 @@ STATUS TfliteDoubleInputOpParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_op, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -165,11 +167,14 @@ STATUS TfliteDoubleInputOpParser::Parse(TfliteTensorsInfo *tensors_info, } op->primitive->value.type = schema::PrimitiveType_Minimum; op->primitive->value.value = attr.release(); + } else { + MS_LOG(ERROR) << node_name << " hasn't been supported"; + return RET_NOT_FIND_OP; } // set input - for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_info, tflite_op->inputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int input : tflite_op->inputs) { + AddOpInput(op, tensors_info, input, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } AddOpOutput(op, tensors_info, tflite_op->outputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); return RET_OK; @@ -179,6 +184,9 @@ STATUS TfliteSingleInputOpParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_op, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -303,6 +311,9 @@ STATUS TfliteSingleInputOpParser::Parse(TfliteTensorsInfo *tensors_info, } op->primitive->value.type = schema::PrimitiveType_Neg; op->primitive->value.value = attr.release(); + } else { + MS_LOG(ERROR) << node_name << " hasn't been supported"; + return RET_NOT_FIND_OP; } AddOpInput(op, tensors_info, tflite_op->inputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); @@ -314,6 +325,9 @@ STATUS TfliteCompareOpParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_op, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -381,45 +395,48 @@ STATUS TfliteCompareOpParser::Parse(TfliteTensorsInfo *tensors_info, } op->primitive->value.type = schema::PrimitiveType_LessEqual; op->primitive->value.value = attr.release(); + } else { + MS_LOG(ERROR) << node_name << " hasn't been supported"; + return RET_NOT_FIND_OP; } - for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_info, tflite_op->inputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int input : tflite_op->inputs) { + AddOpInput(op, tensors_info, input, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } AddOpOutput(op, tensors_info, tflite_op->outputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); return RET_OK; } -TfliteNodeRegister g_tfliteAddParser("Add", new TfliteAddParser()); -TfliteNodeRegister g_tfliteSubParser("Sub", new TfliteSubParser()); -TfliteNodeRegister g_TfliteMulParser("Mul", new TfliteMulParser()); -TfliteNodeRegister g_TfliteDivParser("Div", new TfliteDivParser()); -TfliteNodeRegister g_tfliteFloorDivParser("FloorDiv", new TfliteFloorDivParser()); -TfliteNodeRegister g_tfliteFloorModParser("FloorMod", new TfliteFloorModParser()); -TfliteNodeRegister g_tfliteRealDivParser("RealDiv", new TfliteRealDivParser()); -TfliteNodeRegister g_TflitePowParser("Pow", new TflitePowParser()); -TfliteNodeRegister g_tfliteSquaredDifferenceParser("SquaredDifference", new TfliteSquaredDifferenceParser()); -TfliteNodeRegister g_TfliteMaximumParser("Maximum", new TfliteMaximumParser()); -TfliteNodeRegister g_TfliteMinimumParser("Minimum", new TfliteMinimumParser()); +TfliteNodeRegister g_tfliteAddParser("Add", new TfliteDoubleInputOpParser()); +TfliteNodeRegister g_tfliteSubParser("Sub", new TfliteDoubleInputOpParser()); +TfliteNodeRegister g_tfliteMulParser("Mul", new TfliteDoubleInputOpParser()); +TfliteNodeRegister g_tfliteDivParser("Div", new TfliteDoubleInputOpParser()); +TfliteNodeRegister g_tfliteFloorDivParser("FloorDiv", new TfliteDoubleInputOpParser()); +TfliteNodeRegister g_tfliteFloorModParser("FloorMod", new TfliteDoubleInputOpParser()); +TfliteNodeRegister g_tfliteRealDivParser("RealDiv", new TfliteDoubleInputOpParser()); +TfliteNodeRegister g_tflitePowParser("Pow", new TfliteDoubleInputOpParser()); +TfliteNodeRegister g_tfliteSquaredDifferenceParser("SquaredDifference", new TfliteDoubleInputOpParser()); +TfliteNodeRegister g_tfliteMaximumParser("Maximum", new TfliteDoubleInputOpParser()); +TfliteNodeRegister g_tfliteMinimumParser("Minimum", new TfliteDoubleInputOpParser()); -TfliteNodeRegister g_TfliteAbsParser("Abs", new TfliteAbsParser()); -TfliteNodeRegister g_TfliteExpParser("Exp", new TfliteExpParser()); -TfliteNodeRegister g_TfliteSqrtParser("Sqrt", new TfliteSqrtParser()); -TfliteNodeRegister g_tfliteRsqrtParser("Rsqrt", new TfliteRsqrtParser()); -TfliteNodeRegister g_TfliteSquareParser("Square", new TfliteSquareParser()); -TfliteNodeRegister g_TfliteSinParser("Sin", new TfliteSinParser()); -TfliteNodeRegister g_TfliteCosParser("Cos", new TfliteCosParser()); -TfliteNodeRegister g_TfliteLogParser("Log", new TfliteLogParser()); -TfliteNodeRegister g_tfliteRoundParser("Round", new TfliteRoundParser()); -TfliteNodeRegister g_TfliteCeilParser("Ceil", new TfliteCeilParser()); -TfliteNodeRegister g_tfliteFloorParser("flOOR", new TfliteFloorParser()); -TfliteNodeRegister g_tfliteNegParser("Neg", new TfliteNegParser()); +TfliteNodeRegister g_tfliteAbsParser("Abs", new TfliteSingleInputOpParser()); +TfliteNodeRegister g_tfliteExpParser("Exp", new TfliteSingleInputOpParser()); +TfliteNodeRegister g_tfliteSqrtParser("Sqrt", new TfliteSingleInputOpParser()); +TfliteNodeRegister g_tfliteRsqrtParser("Rsqrt", new TfliteSingleInputOpParser()); +TfliteNodeRegister g_tfliteSquareParser("Square", new TfliteSingleInputOpParser()); +TfliteNodeRegister g_tfliteSinParser("Sin", new TfliteSingleInputOpParser()); +TfliteNodeRegister g_tfliteCosParser("Cos", new TfliteSingleInputOpParser()); +TfliteNodeRegister g_tfliteLogParser("Log", new TfliteSingleInputOpParser()); +TfliteNodeRegister g_tfliteRoundParser("Round", new TfliteSingleInputOpParser()); +TfliteNodeRegister g_tfliteCeilParser("Ceil", new TfliteSingleInputOpParser()); +TfliteNodeRegister g_tfliteFloorParser("flOOR", new TfliteSingleInputOpParser()); +TfliteNodeRegister g_tfliteNegParser("Neg", new TfliteSingleInputOpParser()); -TfliteNodeRegister g_tfliteEqualParser("Equal", new TfliteEqualParser()); -TfliteNodeRegister g_tfliteNotEqualParser("NotEqual", new TfliteNotEqualParser()); -TfliteNodeRegister g_tfliteGreaterEParser("Greater", new TfliteGreaterParser()); -TfliteNodeRegister g_tfliteGreaterEqualParser("GreaterEqual", new TfliteGreaterEqualParser()); -TfliteNodeRegister g_tfliteLessParser("Less", new TfliteLessParser()); -TfliteNodeRegister g_tfliteLessEqualParser("LessEqual", new TfliteLessEqualParser()); +TfliteNodeRegister g_tfliteEqualParser("Equal", new TfliteCompareOpParser()); +TfliteNodeRegister g_tfliteNotEqualParser("NotEqual", new TfliteCompareOpParser()); +TfliteNodeRegister g_tfliteGreaterEParser("Greater", new TfliteCompareOpParser()); +TfliteNodeRegister g_tfliteGreaterEqualParser("GreaterEqual", new TfliteCompareOpParser()); +TfliteNodeRegister g_tfliteLessParser("Less", new TfliteCompareOpParser()); +TfliteNodeRegister g_tfliteLessEqualParser("LessEqual", new TfliteCompareOpParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.h index c9d20cddf2..b99b5fbeb9 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.h @@ -34,61 +34,6 @@ class TfliteDoubleInputOpParser : public TfliteNodeParser { const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) override; }; -class TfliteAddParser : public TfliteDoubleInputOpParser { - public: - TfliteAddParser() : TfliteDoubleInputOpParser() {} -}; - -class TfliteSubParser : public TfliteDoubleInputOpParser { - public: - TfliteSubParser() : TfliteDoubleInputOpParser() {} -}; - -class TfliteMulParser : public TfliteDoubleInputOpParser { - public: - TfliteMulParser() : TfliteDoubleInputOpParser() {} -}; - -class TfliteDivParser : public TfliteDoubleInputOpParser { - public: - TfliteDivParser() : TfliteDoubleInputOpParser() {} -}; - -class TfliteFloorDivParser : public TfliteDoubleInputOpParser { - public: - TfliteFloorDivParser() : TfliteDoubleInputOpParser() {} -}; - -class TfliteFloorModParser : public TfliteDoubleInputOpParser { - public: - TfliteFloorModParser() : TfliteDoubleInputOpParser() {} -}; - -class TfliteSquaredDifferenceParser : public TfliteDoubleInputOpParser { - public: - TfliteSquaredDifferenceParser() : TfliteDoubleInputOpParser() {} -}; - -class TfliteRealDivParser : public TfliteDoubleInputOpParser { - public: - TfliteRealDivParser() : TfliteDoubleInputOpParser() {} -}; - -class TflitePowParser : public TfliteDoubleInputOpParser { - public: - TflitePowParser() : TfliteDoubleInputOpParser() {} -}; - -class TfliteMaximumParser : public TfliteDoubleInputOpParser { - public: - TfliteMaximumParser() : TfliteDoubleInputOpParser() {} -}; - -class TfliteMinimumParser : public TfliteDoubleInputOpParser { - public: - TfliteMinimumParser() : TfliteDoubleInputOpParser() {} -}; - class TfliteSingleInputOpParser : public TfliteNodeParser { public: TfliteSingleInputOpParser() : TfliteNodeParser("node_name") {} @@ -98,66 +43,6 @@ class TfliteSingleInputOpParser : public TfliteNodeParser { const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) override; }; -class TfliteAbsParser : public TfliteSingleInputOpParser { - public: - TfliteAbsParser() : TfliteSingleInputOpParser() {} -}; - -class TfliteExpParser : public TfliteSingleInputOpParser { - public: - TfliteExpParser() : TfliteSingleInputOpParser() {} -}; - -class TfliteSqrtParser : public TfliteSingleInputOpParser { - public: - TfliteSqrtParser() : TfliteSingleInputOpParser() {} -}; - -class TfliteSquareParser : public TfliteSingleInputOpParser { - public: - TfliteSquareParser() : TfliteSingleInputOpParser() {} -}; - -class TfliteSinParser : public TfliteSingleInputOpParser { - public: - TfliteSinParser() : TfliteSingleInputOpParser() {} -}; - -class TfliteCosParser : public TfliteSingleInputOpParser { - public: - TfliteCosParser() : TfliteSingleInputOpParser() {} -}; - -class TfliteRsqrtParser : public TfliteSingleInputOpParser { - public: - TfliteRsqrtParser() : TfliteSingleInputOpParser() {} -}; - -class TfliteLogParser : public TfliteSingleInputOpParser { - public: - TfliteLogParser() : TfliteSingleInputOpParser() {} -}; - -class TfliteRoundParser : public TfliteSingleInputOpParser { - public: - TfliteRoundParser() : TfliteSingleInputOpParser() {} -}; - -class TfliteCeilParser : public TfliteSingleInputOpParser { - public: - TfliteCeilParser() : TfliteSingleInputOpParser() {} -}; - -class TfliteFloorParser : public TfliteSingleInputOpParser { - public: - TfliteFloorParser() : TfliteSingleInputOpParser() {} -}; - -class TfliteNegParser : public TfliteSingleInputOpParser { - public: - TfliteNegParser() : TfliteSingleInputOpParser() {} -}; - class TfliteCompareOpParser : public TfliteNodeParser { public: TfliteCompareOpParser() : TfliteNodeParser("node_name") {} @@ -166,36 +51,6 @@ class TfliteCompareOpParser : public TfliteNodeParser { const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) override; }; - -class TfliteEqualParser : public TfliteCompareOpParser { - public: - TfliteEqualParser() : TfliteCompareOpParser() {} -}; - -class TfliteNotEqualParser : public TfliteCompareOpParser { - public: - TfliteNotEqualParser() : TfliteCompareOpParser() {} -}; - -class TfliteGreaterParser : public TfliteCompareOpParser { - public: - TfliteGreaterParser() : TfliteCompareOpParser() {} -}; - -class TfliteGreaterEqualParser : public TfliteCompareOpParser { - public: - TfliteGreaterEqualParser() : TfliteCompareOpParser() {} -}; - -class TfliteLessParser : public TfliteCompareOpParser { - public: - TfliteLessParser() : TfliteCompareOpParser() {} -}; - -class TfliteLessEqualParser : public TfliteCompareOpParser { - public: - TfliteLessEqualParser() : TfliteCompareOpParser() {} -}; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.cc index 4a8cf9e2f0..69a01c54ad 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.cc @@ -27,6 +27,9 @@ STATUS TfliteBatchToSpaceParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_op, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -44,6 +47,9 @@ STATUS TfliteBatchToSpaceParser::Parse(TfliteTensorsInfo *tensors_info, MS_LOG(DEBUG) << "parse TfliteBatchToSpaceParser"; } else if (std::strcmp(node_name, "BatchToSpaceND") == 0) { MS_LOG(DEBUG) << "parse TfliteBatchToSpaceNDParser"; + } else { + MS_LOG(ERROR) << node_name << " hasn't been supported"; + return RET_NOT_FIND_OP; } std::unique_ptr attr = std::make_unique(); @@ -70,6 +76,6 @@ STATUS TfliteBatchToSpaceParser::Parse(TfliteTensorsInfo *tensors_info, } TfliteNodeRegister g_tfliteBatchToSpaceParser("BatchToSpace", new TfliteBatchToSpaceParser()); -TfliteNodeRegister g_TfliteBatchToSpaceNDParser("BatchToSpaceND", new TfliteBatchToSpaceNDParser()); +TfliteNodeRegister g_tfliteBatchToSpaceNDParser("BatchToSpaceND", new TfliteBatchToSpaceParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.h index a5c8c86201..71d32f2531 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.h @@ -33,11 +33,6 @@ class TfliteBatchToSpaceParser : public TfliteNodeParser { const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) override; }; - -class TfliteBatchToSpaceNDParser : public TfliteBatchToSpaceParser { - public: - TfliteBatchToSpaceNDParser() : TfliteBatchToSpaceParser() {} -}; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.cc index 51977aae0c..0c49c600a7 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.cc @@ -18,7 +18,6 @@ #include "tools/converter/parser/tflite/tflite_broadcast_to_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -27,6 +26,9 @@ STATUS TfliteBroadcastToParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteBroadcastToParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.cc index 6c7dabbd36..cd561c1e81 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_cast_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteCastParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniqu const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteCastParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.cc index 1ae7041dbd..8b9027478f 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_concat_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteConcatParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteConcatParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -52,8 +54,8 @@ STATUS TfliteConcatParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni op->primitive->value.type = schema::PrimitiveType_Concat; op->primitive->value.value = attr.release(); - for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_info, tflite_op->inputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int input : tflite_op->inputs) { + AddOpInput(op, tensors_info, input, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } AddOpOutput(op, tensors_info, tflite_op->outputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); return RET_OK; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.cc index 1006d7d48c..4345f02732 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_conv_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteConvParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniqu const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteConvParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.cc index 61a7af2582..ba22e6b79f 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.cc @@ -18,7 +18,6 @@ #include #include #include -#include "flatbuffers/flatbuffers.h" #include "flatbuffers/flexbuffers.h" namespace mindspore { @@ -212,6 +211,9 @@ STATUS TfliteCustomParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteCustomParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -222,8 +224,14 @@ STATUS TfliteCustomParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni return RET_NULL_PTR; } const auto &custom_attr = tflite_op->custom_options; - const auto &opcode_index = tflite_op->opcode_index; - const auto &custom_type = tflite_model->operator_codes[opcode_index]->custom_code; + const auto opcode_index = tflite_op->opcode_index; + const auto &operator_code = tflite_model->operator_codes[opcode_index]; + if (operator_code == nullptr) { + MS_LOG(ERROR) << "operator_code is null"; + return RET_NULL_PTR; + } + const auto &custom_type = operator_code->custom_code; + int status = RET_OK; if (custom_type == "TFLite_Detection_PostProcess") { status = DetectPostProcess(custom_attr, op, tflite_op); @@ -254,11 +262,12 @@ STATUS TfliteCustomParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni if (status != RET_OK) { return status; } - for (size_t i = 0; i < tflite_op->inputs.size(); ++i) { - AddOpInput(op, tensors_info, tflite_op->inputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + + for (int input : tflite_op->inputs) { + AddOpInput(op, tensors_info, input, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } - for (size_t i = 0; i < tflite_op->outputs.size(); ++i) { - AddOpOutput(op, tensors_info, tflite_op->outputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int output : tflite_op->outputs) { + AddOpOutput(op, tensors_info, output, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } return status; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.h index 90386cec63..b1ad78dee5 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.h @@ -23,8 +23,7 @@ #include "tools/converter/parser/tflite/tflite_node_parser.h" #include "tools/converter/parser/tflite/tflite_node_parser_registry.h" -namespace mindspore { -namespace lite { +namespace mindspore::lite { class TfliteCustomParser : public TfliteNodeParser { public: TfliteCustomParser() : TfliteNodeParser("Custom") {} @@ -33,41 +32,40 @@ class TfliteCustomParser : public TfliteNodeParser { const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) override; - STATUS DetectPostProcess(const std::vector &custom_attr, schema::CNodeT *op, - const std::unique_ptr &tflite_op); + static STATUS DetectPostProcess(const std::vector &custom_attr, schema::CNodeT *op, + const std::unique_ptr &tflite_op); - STATUS AudioSpectrogram(const std::vector &custom_attr, schema::CNodeT *op, - const std::unique_ptr &tflite_op); + static STATUS AudioSpectrogram(const std::vector &custom_attr, schema::CNodeT *op, + const std::unique_ptr &tflite_op); - STATUS Mfcc(const std::vector &custom_attr, schema::CNodeT *op, - const std::unique_ptr &tflite_op); + static STATUS Mfcc(const std::vector &custom_attr, schema::CNodeT *op, + const std::unique_ptr &tflite_op); - STATUS Predict(const std::vector &custom_attr, schema::CNodeT *op, - const std::unique_ptr &tflite_op); + static STATUS Predict(const std::vector &custom_attr, schema::CNodeT *op, + const std::unique_ptr &tflite_op); - STATUS Normalize(const std::vector &custom_attr, schema::CNodeT *op, - const std::unique_ptr &tflite_op); + static STATUS Normalize(const std::vector &custom_attr, schema::CNodeT *op, + const std::unique_ptr &tflite_op); - STATUS ExtractFeatures(const std::vector &custom_attr, schema::CNodeT *op, - const std::unique_ptr &tflite_op); + static STATUS ExtractFeatures(const std::vector &custom_attr, schema::CNodeT *op, + const std::unique_ptr &tflite_op); STATUS Rfft(const std::vector &custom_attr, schema::CNodeT *op, const std::unique_ptr &tflite_op, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph); - STATUS FftReal(const std::vector &custom_attr, schema::CNodeT *op, - const std::unique_ptr &tflite_op); + static STATUS FftReal(const std::vector &custom_attr, schema::CNodeT *op, + const std::unique_ptr &tflite_op); - STATUS FftImag(const std::vector &custom_attr, schema::CNodeT *op, - const std::unique_ptr &tflite_op); + static STATUS FftImag(const std::vector &custom_attr, schema::CNodeT *op, + const std::unique_ptr &tflite_op); - STATUS Identity(const std::vector &custom_attr, schema::CNodeT *op, - const std::unique_ptr &tflite_op); + static STATUS Identity(const std::vector &custom_attr, schema::CNodeT *op, + const std::unique_ptr &tflite_op); - STATUS BatchMatMul(const std::vector &custom_attr, schema::CNodeT *op, - const std::unique_ptr &tflite_op); + static STATUS BatchMatMul(const std::vector &custom_attr, schema::CNodeT *op, + const std::unique_ptr &tflite_op); }; -} // namespace lite -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_CUSTOM_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.cc index f97859bccb..703be87fcc 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_deconv_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteDeConvParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse tflite Transpose_Conv parser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.cc index acb680f614..cb08fede98 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.cc @@ -18,7 +18,6 @@ #include "tools/converter/parser/tflite/tflite_depth_to_space_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -27,7 +26,9 @@ STATUS TfliteDepthToSpaceParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteDepthToSpaceParser"; - + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.cc index 947c485434..27ae6c6b7e 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_depthwise_conv_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -27,6 +26,9 @@ STATUS TfliteDepthwiseConv2DParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteDepthwiseConv2DParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.cc index 5bdd03e289..f35b26f631 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.cc @@ -16,7 +16,6 @@ #include "tools/converter/parser/tflite/tflite_dequantize_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteDequantizeParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteDequantizeNParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.cc index 01a626a1af..c0304af882 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_expand_dims_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -26,6 +25,9 @@ STATUS TfliteExpandDimsParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteExpandDimsParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -49,6 +51,7 @@ STATUS TfliteExpandDimsParser::Parse(TfliteTensorsInfo *tensors_info, attr->dim = dims[0]; op->primitive->value.type = schema::PrimitiveType_ExpandDims; op->primitive->value.value = attr.release(); + AddOpInput(op, tensors_info, tflite_op->inputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); AddOpOutput(op, tensors_info, tflite_op->outputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); return RET_OK; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.cc index 9a426f00f1..a27e3f63db 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_fill_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteFillParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniqu const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteFillParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -56,6 +58,6 @@ STATUS TfliteFillParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniqu return RET_OK; } -TfliteNodeRegister g_TfliteFillParser("Fill", new TfliteFillParser()); +TfliteNodeRegister g_tfliteFillParser("Fill", new TfliteFillParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.cc index 2874c4de7c..92543ac5c3 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_fullyconnected_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -27,6 +26,9 @@ STATUS TfliteFullyConnectedParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteFullyConnectedParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -69,6 +71,6 @@ STATUS TfliteFullyConnectedParser::Parse(TfliteTensorsInfo *tensors_info, } TfliteNodeRegister g_tfliteFullyConnectedParser("FullyConnected", new TfliteFullyConnectedParser()); -TfliteNodeRegister g_tfliteFakeQuantParser("FakeQuant", new TfliteFakeQuantParser()); +TfliteNodeRegister g_tfliteFakeQuantParser("FakeQuant", new TfliteFullyConnectedParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.h index e9ee93336c..d81fe6a4e8 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.h @@ -33,11 +33,6 @@ class TfliteFullyConnectedParser : public TfliteNodeParser { const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) override; }; - -class TfliteFakeQuantParser : public TfliteFullyConnectedParser { - public: - TfliteFakeQuantParser() : TfliteFullyConnectedParser() {} -}; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.cc index 8038953583..b74a2205e7 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_gather_nd_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteGatherNdParser::Parse(TfliteTensorsInfo *tensors_info, const std::u const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteGatherNdParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -46,8 +48,8 @@ STATUS TfliteGatherNdParser::Parse(TfliteTensorsInfo *tensors_info, const std::u op->primitive->value.type = schema::PrimitiveType_GatherNd; op->primitive->value.value = attr.release(); - for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_info, tflite_op->inputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int input : tflite_op->inputs) { + AddOpInput(op, tensors_info, input, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } AddOpOutput(op, tensors_info, tflite_op->outputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); return RET_OK; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.cc index eb3247d641..5b00acb82d 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_gather_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteGatherParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteGatherParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -52,8 +54,8 @@ STATUS TfliteGatherParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni op->primitive->value.type = schema::PrimitiveType_Gather; op->primitive->value.value = attr.release(); - for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_info, tflite_op->inputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int input : tflite_op->inputs) { + AddOpInput(op, tensors_info, input, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } AddOpOutput(op, tensors_info, tflite_op->outputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); return RET_OK; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_hashtable_lookup_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_hashtable_lookup_parser.cc index 05957d9e69..c57e6cac58 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_hashtable_lookup_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_hashtable_lookup_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_hashtable_lookup_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -27,6 +26,9 @@ STATUS TfliteHashtableLookupParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteHashtableLookupParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -45,11 +47,11 @@ STATUS TfliteHashtableLookupParser::Parse(TfliteTensorsInfo *tensors_info, op->primitive->value.type = schema::PrimitiveType_HashtableLookup; op->primitive->value.value = attr.release(); - for (size_t i = 0; i < tflite_op->inputs.size(); ++i) { - AddOpInput(op, tensors_info, tflite_op->inputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int input : tflite_op->inputs) { + AddOpInput(op, tensors_info, input, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } - for (size_t i = 0; i < tflite_op->outputs.size(); ++i) { - AddOpOutput(op, tensors_info, tflite_op->outputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int output : tflite_op->outputs) { + AddOpOutput(op, tensors_info, output, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.cc index a177e21dde..617b606423 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.cc @@ -18,7 +18,6 @@ #include "tools/converter/parser/tflite/tflite_l2norm_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -26,6 +25,9 @@ STATUS TfliteL2NormParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteL2NormParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -49,7 +51,6 @@ STATUS TfliteL2NormParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni op->primitive->value.type = schema::PrimitiveType_L2Norm; op->primitive->value.value = attr.release(); - // set input and output AddOpInput(op, tensors_info, tflite_op->inputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); AddOpOutput(op, tensors_info, tflite_op->outputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); return RET_OK; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.cc index 5157a16857..1d417f0020 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.cc @@ -18,13 +18,15 @@ #include #include #include -#include namespace mindspore { namespace lite { STATUS TfliteLogicalParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_op, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -65,17 +67,20 @@ STATUS TfliteLogicalParser::Parse(TfliteTensorsInfo *tensors_info, const std::un } op->primitive->value.type = schema::PrimitiveType_LogicalOr; op->primitive->value.value = attr.release(); + } else { + MS_LOG(ERROR) << node_name << " hasn't been supported"; + return RET_NOT_FIND_OP; } - for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_info, tflite_op->inputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int input : tflite_op->inputs) { + AddOpInput(op, tensors_info, input, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } AddOpOutput(op, tensors_info, tflite_op->outputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); return RET_OK; } -TfliteNodeRegister g_TfliteLogicalAndParser("LogicalAnd", new TfliteLogicalAndParser()); -TfliteNodeRegister g_TfliteLogicalNotParser("LogicalNot", new TfliteLogicalNotParser()); -TfliteNodeRegister g_TfliteLogicalOrParser("LogicalOr", new TfliteLogicalOrParser()); +TfliteNodeRegister g_tfliteLogicalAndParser("LogicalAnd", new TfliteLogicalParser()); +TfliteNodeRegister g_tfliteLogicalNotParser("LogicalNot", new TfliteLogicalParser()); +TfliteNodeRegister g_tfliteLogicalOrParser("LogicalOr", new TfliteLogicalParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.h index 45b45bebe1..6740a775b6 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_AND_PARSER_H -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_AND_PARSER_H +#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_LOGICAL_PARSER_H +#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_LOGICAL_PARSER_H #include #include @@ -33,22 +33,7 @@ class TfliteLogicalParser : public TfliteNodeParser { const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) override; }; - -class TfliteLogicalAndParser : public TfliteLogicalParser { - public: - TfliteLogicalAndParser() : TfliteLogicalParser() {} -}; - -class TfliteLogicalNotParser : public TfliteLogicalParser { - public: - TfliteLogicalNotParser() : TfliteLogicalParser() {} -}; - -class TfliteLogicalOrParser : public TfliteLogicalParser { - public: - TfliteLogicalOrParser() : TfliteLogicalParser() {} -}; } // namespace lite } // namespace mindspore -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_LOGICAL_AND_PARSER_H +#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_LOGICAL_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.cc index 09bde50e35..b0fe0909c7 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_lrn_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteLRNParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteLRNParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_lsh_projection_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_lsh_projection_parser.cc index dd7e3fa038..5d08a82d20 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_lsh_projection_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_lsh_projection_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_lsh_projection_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -26,6 +25,9 @@ STATUS TfliteLshProjectionParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteLshProjectionParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -56,8 +58,8 @@ STATUS TfliteLshProjectionParser::Parse(TfliteTensorsInfo *tensors_info, op->primitive->value.type = schema::PrimitiveType_LshProjection; op->primitive->value.value = attr.release(); - for (size_t i = 0; i < tflite_op->inputs.size(); ++i) { - AddOpInput(op, tensors_info, tflite_op->inputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int input : tflite_op->inputs) { + AddOpInput(op, tensors_info, input, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } AddOpOutput(op, tensors_info, tflite_op->outputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); return RET_OK; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc index ca72489a8e..177701e84d 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc @@ -32,7 +32,7 @@ TfliteModelParser::TfliteModelParser() = default; TfliteModelParser::~TfliteModelParser() { delete[](this->tfliteModelBuf); } std::unique_ptr TfliteModelParser::ReadTfliteModel(const char *model_path) { - size_t size; + size_t size = 0; tfliteModelBuf = ReadFile(model_path, &size); if (tfliteModelBuf == nullptr) { MS_LOG(ERROR) << "the file buffer is nullptr"; @@ -48,11 +48,20 @@ std::unique_ptr TfliteModelParser::ReadTfliteModel(const char *m STATUS TfliteModelParser::CopyConstTensorData(const std::vector> &tflite_model_buffer, const tflite::TensorT *tflite_tensor, schema::TensorT *tensor) { + MS_ASSERT(tensor != nullptr); + MS_ASSERT(tflite_tensor != nullptr); auto buffer_idx = tflite_tensor->buffer; - if (!tflite_model_buffer[buffer_idx]->data.empty()) { - auto data_size = tflite_model_buffer[buffer_idx]->data.size(); + + const auto &buf = tflite_model_buffer[buffer_idx]; + if (buf == nullptr) { + MS_LOG(ERROR) << "tensor is null"; + return RET_NULL_PTR; + } + + if (!buf->data.empty()) { + auto data_size = buf->data.size(); tensor->data.resize(data_size); - if (memcpy_s(tensor->data.data(), data_size, tflite_model_buffer[buffer_idx]->data.data(), data_size) != EOK) { + if (memcpy_s(tensor->data.data(), data_size, buf->data.data(), data_size) != EOK) { MS_LOG(ERROR) << "memcpy tensor data failed"; return RET_MEMORY_FAILED; } @@ -65,9 +74,20 @@ STATUS TfliteModelParser::CopyConstTensorData(const std::vector &tflite_tensor, schema::TensorT *tensor) { + MS_ASSERT(tensor != nullptr); tensor->quantParams.clear(); + + if (tflite_tensor->quantization == nullptr) { + MS_LOG(ERROR) << "tflite_tensor->quantization is null"; + return; + } for (size_t i = 0; i < tflite_tensor->quantization->scale.size(); i++) { std::unique_ptr quant_param = std::make_unique(); + if (quant_param == nullptr) { + MS_LOG(ERROR) << "quant_param is null"; + return; + } + if (!tflite_tensor->quantization->scale.empty()) { quant_param->scale = tflite_tensor->quantization->scale[i]; } @@ -91,14 +111,27 @@ void TfliteModelParser::SetTensorQuantParam(const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, const QuantType &quant_type, schema::MetaGraphT *sub_graph) { + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); + MS_ASSERT(sub_graph != nullptr); + int idx = 0; int status = RET_OK; NoSupportOp::GetInstance()->SetFmkType("TFLITE"); for (const auto &tflite_op : tflite_subgraph->operators) { - auto tflite_op_type = (tflite_model->operator_codes[tflite_op->opcode_index])->builtin_code; - auto op_type = GetMSOpType(tflite_op_type); + const auto opcode_index = tflite_op->opcode_index; + const auto &operator_code = tflite_model->operator_codes[opcode_index]; + if (operator_code == nullptr) { + MS_LOG(ERROR) << "operator_code is null"; + return RET_ERROR; + } + auto op_type = GetMSOpType(operator_code->builtin_code); auto op = std::make_unique(); + if (op == nullptr) { + MS_LOG(ERROR) << "op is null"; + return RET_NULL_PTR; + } op->name = op_type + "-" + std::to_string(idx++); op->quantType = quant_type; MS_LOG(INFO) << "parse op: " << op->name.c_str(); @@ -114,8 +147,7 @@ STATUS TfliteModelParser::ConvertOp(const std::unique_ptr &tflit status = (status == RET_OK ? status_node : status); if (status_node != RET_OK) { if (status_node == RET_NOT_FIND_OP) { - op_type = - (op_type != "Custom" ? op_type : (tflite_model->operator_codes[tflite_op->opcode_index])->custom_code); + op_type = (op_type != "Custom" ? op_type : operator_code->custom_code); NoSupportOp::GetInstance()->InsertOp(op_type); } else { MS_LOG(ERROR) << "node " << op_type.c_str() << " parser failed"; @@ -136,10 +168,11 @@ STATUS TfliteModelParser::ConvertOp(const std::unique_ptr &tflit STATUS TfliteModelParser::ConvertTensor(const std::unique_ptr &tflite_subgraph, const std::vector> &tflite_model_buffer, schema::MetaGraphT *sub_graph) { + MS_ASSERT(tflite_subgraph != nullptr); + MS_ASSERT(sub_graph != nullptr); std::set output_index; for (const auto &tflite_op : tflite_subgraph->operators) { - for (size_t j = 0; j < tflite_op->outputs.size(); ++j) { - int idx = tflite_op->outputs[j]; + for (int idx : tflite_op->outputs) { if (idx < 0) { idx += tflite_subgraph->tensors.size(); } @@ -152,7 +185,16 @@ STATUS TfliteModelParser::ConvertTensor(const std::unique_ptr idx += tflite_subgraph->tensors.size(); } const auto &tflite_tensor = tflite_subgraph->tensors[idx]; + if (tflite_tensor == nullptr) { + MS_LOG(ERROR) << "tflite_tensor is null"; + return RET_NULL_PTR; + } + std::unique_ptr tensor = std::make_unique(); + if (tensor == nullptr) { + MS_LOG(ERROR) << "tensor is null"; + return RET_NULL_PTR; + } tensor->format = tensorsInfo.tensorsFormat[i]; tensor->dataType = GetTfliteDataType(tflite_tensor->type); @@ -170,6 +212,10 @@ STATUS TfliteModelParser::ConvertTensor(const std::unique_ptr // add data for const tensor auto &tensor_buffer = tflite_model_buffer.at(tflite_tensor->buffer); + if (tensor_buffer == nullptr) { + MS_LOG(ERROR) << "tensor_buffer is null"; + return RET_NULL_PTR; + } auto isConst = (!tensor_buffer->data.empty()); if (isConst) { int status = CopyConstTensorData(tflite_model_buffer, tflite_tensor.get(), tensor.get()); @@ -209,17 +255,13 @@ STATUS TfliteModelParser::ConvertTensor(const std::unique_ptr STATUS TfliteModelParser::GetGraphInfo(const std::unique_ptr &tflite_subgraph, schema::MetaGraphT *sub_graph) { - int id; - + MS_ASSERT(sub_graph != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); // graph input std::vector graph_inputs; for (size_t i = 0; i < tflite_subgraph->inputs.size(); i++) { const int idx = tflite_subgraph->inputs[i]; - if (idx < 0) { - id = idx + tflite_subgraph->tensors.size(); - } else { - id = idx; - } + int id = idx < 0 ? idx + tflite_subgraph->tensors.size() : idx; auto iter = tensorsInfo.tensorsIdMap.find(id); if (iter != tensorsInfo.tensorsIdMap.end()) { graph_inputs.push_back(iter->second); @@ -234,11 +276,7 @@ STATUS TfliteModelParser::GetGraphInfo(const std::unique_ptr std::vector graph_outputs; for (size_t i = 0; i < tflite_subgraph->outputs.size(); i++) { const int idx = tflite_subgraph->outputs[i]; - if (idx < 0) { - id = idx + tflite_subgraph->tensors.size(); - } else { - id = idx; - } + int id = idx < 0 ? idx + tflite_subgraph->tensors.size() : idx; auto iter = tensorsInfo.tensorsIdMap.find(id); if (iter != tensorsInfo.tensorsIdMap.end()) { graph_outputs.push_back(iter->second); @@ -252,21 +290,30 @@ STATUS TfliteModelParser::GetGraphInfo(const std::unique_ptr } STATUS TfliteModelParser::ConvertGroupDepthwiseOp(schema::MetaGraphT *sub_graph) { + MS_ASSERT(sub_graph != nullptr); for (auto &op : sub_graph->nodes) { + if (op->primitive == nullptr) { + MS_LOG(ERROR) << "op->primitive is null"; + return RET_NULL_PTR; + } if (op->primitive->value.type == schema::PrimitiveType_DepthwiseConv2D) { auto attr = op->primitive->value.AsDepthwiseConv2D(); + if (attr == nullptr) { + MS_LOG(ERROR) << "attr is null"; + return RET_NULL_PTR; + } if (attr->channelMultiplier > 1) { // get channel attr if (op->inputIndex.empty()) { MS_LOG(ERROR) << "the input of DepthwiseConv2D is null"; return RET_NULL_PTR; } - auto data_id = op->inputIndex[0]; + const auto data_id = op->inputIndex[0]; if (sub_graph->allTensors.size() <= data_id) { MS_LOG(ERROR) << "the number of allTensors is less than " << data_id; return RET_ERROR; } - auto &data_tensor = sub_graph->allTensors.at(data_id); + const auto &data_tensor = sub_graph->allTensors.at(data_id); if (data_tensor == nullptr) { MS_LOG(ERROR) << "the data tensor is null"; return RET_NULL_PTR; @@ -277,6 +324,10 @@ STATUS TfliteModelParser::ConvertGroupDepthwiseOp(schema::MetaGraphT *sub_graph) return RET_NO_CHANGE; } std::unique_ptr conv_attr = std::make_unique(); + if (conv_attr == nullptr) { + MS_LOG(ERROR) << "conv_attr is null"; + return RET_NULL_PTR; + } if (data_shape[3] == 1) { conv_attr->channelIn = data_shape[3]; conv_attr->channelOut = conv_attr->channelIn * attr->channelMultiplier; @@ -336,7 +387,8 @@ STATUS TfliteModelParser::ConvertGroupDepthwiseOp(schema::MetaGraphT *sub_graph) std::unique_ptr TfliteModelParser::ConstructMainGraph( const std::unique_ptr &tflite_model, const QuantType &quant_type) { - if (tflite_model->subgraphs.size() < 1) { + MS_ASSERT(tflite_model != nullptr); + if (tflite_model->subgraphs.empty()) { MS_LOG(ERROR) << "read tflite model main subgraphs failed"; ReturnCode::GetSingleReturnCode()->UpdateReturnCode(RET_GRAPH_FILE_ERR); return nullptr; @@ -388,6 +440,11 @@ std::unique_ptr TfliteModelParser::ConstructMainGraph( schema::MetaGraphT *TfliteModelParser::ParseToFb(const std::string &model_file, const std::string &weight_file, const QuantType &quant_type) { + if (model_file.empty()) { + MS_LOG(ERROR) << "model_file is empty"; + return nullptr; + } + // load graph auto tflite_model = ReadTfliteModel(model_file.c_str()); if (tflite_model == nullptr) { diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.h index b811387b68..0ac93eeeaf 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.h @@ -46,10 +46,10 @@ class TfliteModelParser : public ModelParser { protected: std::unique_ptr ReadTfliteModel(const char *model_path); - STATUS CopyConstTensorData(const std::vector> &tflite_model_buffer, - const tflite::TensorT *tflite_tensor, schema::TensorT *tensor); + static STATUS CopyConstTensorData(const std::vector> &tflite_model_buffer, + const tflite::TensorT *tflite_tensor, schema::TensorT *tensor); - void SetTensorQuantParam(const std::unique_ptr &tflite_tensor, schema::TensorT *tensor); + static void SetTensorQuantParam(const std::unique_ptr &tflite_tensor, schema::TensorT *tensor); STATUS ConvertOp(const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, const QuantType &quant_type, @@ -61,7 +61,7 @@ class TfliteModelParser : public ModelParser { STATUS GetGraphInfo(const std::unique_ptr &tflite_subgraph, schema::MetaGraphT *sub_graph); - STATUS ConvertGroupDepthwiseOp(schema::MetaGraphT *sub_graph); + static STATUS ConvertGroupDepthwiseOp(schema::MetaGraphT *sub_graph); QuantType quantType = QuantType_QUANT_NONE; char *tfliteModelBuf = nullptr; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h index 9521b8600e..0cc6e1bebb 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h @@ -46,7 +46,10 @@ class TfliteNodeParser { return RET_OK; } - void AddOpInput(schema::CNodeT *op, TfliteTensorsInfo *tensors_info, int idx, int total, schema::Format format) { + static void AddOpInput(schema::CNodeT *op, TfliteTensorsInfo *tensors_info, int idx, int total, + schema::Format format) { + MS_ASSERT(op != nullptr); + MS_ASSERT(tensors_info != nullptr); int new_idx = tensors_info->tensorsId.size(); auto iter = tensors_info->tensorsIdMap.find(idx); if (iter != tensors_info->tensorsIdMap.end()) { @@ -62,7 +65,10 @@ class TfliteNodeParser { } } - void AddOpOutput(schema::CNodeT *op, TfliteTensorsInfo *tensors_info, int idx, int total, schema::Format format) { + static void AddOpOutput(schema::CNodeT *op, TfliteTensorsInfo *tensors_info, int idx, int total, + schema::Format format) { + MS_ASSERT(op != nullptr); + MS_ASSERT(tensors_info != nullptr); int new_idx = tensors_info->tensorsId.size(); auto iter = tensors_info->tensorsIdMap.find(idx); if (iter != tensors_info->tensorsIdMap.end()) { @@ -82,10 +88,15 @@ class TfliteNodeParser { STATUS GetTfliteData(const int32_t tensor_index, const std::vector> &tflite_tensors, const std::vector> &tflite_model_buffer, std::vector &attr_data) { + const auto &tensor = tflite_tensors[tensor_index]; + if (tensor == nullptr) { + MS_LOG(ERROR) << "tensor is null"; + return RET_NULL_PTR; + } + int32_t count = 1; - std::for_each(tflite_tensors[tensor_index]->shape.begin(), tflite_tensors[tensor_index]->shape.end(), - [&](int32_t sha) { count *= sha; }); - auto &buf_data = tflite_model_buffer[tflite_tensors[tensor_index]->buffer]; + std::for_each(tensor->shape.begin(), tensor->shape.end(), [&](int32_t sha) { count *= sha; }); + auto &buf_data = tflite_model_buffer[tensor->buffer]; if (buf_data == nullptr) { MS_LOG(ERROR) << "buf_data is null"; return RET_NULL_PTR; @@ -95,7 +106,7 @@ class TfliteNodeParser { MS_LOG(DEBUG) << "data is not a constant"; return RET_NO_CHANGE; } - switch (tflite_tensors[tensor_index]->type) { + switch (tensor->type) { case tflite::TensorType_UINT8: { for (int i = 0; i < count; i++) { uint8_t data = *(static_cast(static_cast(data_ptr))); @@ -145,7 +156,7 @@ class TfliteNodeParser { break; } default: { - MS_LOG(ERROR) << "wrong tensor type"; + MS_LOG(ERROR) << "wrong tensor type : " << tensor->type; return RET_ERROR; } } @@ -154,13 +165,6 @@ class TfliteNodeParser { protected: const std::string &name; - std::map dtype_map = { - {tflite::TensorType_FLOAT64, TypeId::kNumberTypeFloat64}, {tflite::TensorType_FLOAT32, TypeId::kNumberTypeFloat32}, - {tflite::TensorType_FLOAT16, TypeId::kNumberTypeFloat16}, {tflite::TensorType_INT64, TypeId::kNumberTypeInt64}, - {tflite::TensorType_INT32, TypeId::kNumberTypeInt32}, {tflite::TensorType_INT16, TypeId::kNumberTypeInt16}, - {tflite::TensorType_INT8, TypeId::kNumberTypeInt8}, {tflite::TensorType_UINT8, TypeId::kNumberTypeUInt8}, - {tflite::TensorType_BOOL, TypeId::kNumberTypeBool}, - }; }; } // namespace mindspore::lite diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.cc index 1087536c8d..b420b99927 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.cc @@ -18,7 +18,7 @@ namespace mindspore { namespace lite { -TfliteNodeParserRegistry::TfliteNodeParserRegistry() {} +TfliteNodeParserRegistry::TfliteNodeParserRegistry() = default; TfliteNodeParserRegistry::~TfliteNodeParserRegistry() { for (auto ite : parsers) { diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.cc index d685b16705..eb9d2ac4f9 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_one_hot_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteOneHotParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteOneHotParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -46,24 +48,23 @@ STATUS TfliteOneHotParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni MS_LOG(ERROR) << "get op: " << op->name << " attr failed"; return RET_NULL_PTR; } - auto axis = tflite_attr->axis; - const auto &tensor = tflite_subgraph->tensors[tflite_op->inputs[0]]; - if (tensor == nullptr) { - MS_LOG(ERROR) << "tensor is null"; + attr->axis = tflite_attr->axis; + + op->primitive = std::make_unique(); + if (op->primitive == nullptr) { + MS_LOG(ERROR) << "op->primitive is null"; return RET_NULL_PTR; } - attr->axis = axis; - op->primitive->value.type = schema::PrimitiveType_OneHot; op->primitive->value.value = attr.release(); - for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_info, tflite_op->inputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int input : tflite_op->inputs) { + AddOpInput(op, tensors_info, input, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } AddOpOutput(op, tensors_info, tflite_op->outputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); return RET_OK; } -TfliteNodeRegister g_TfliteOneHotParser("OneHot", new TfliteOneHotParser()); +TfliteNodeRegister g_tfliteOneHotParser("OneHot", new TfliteOneHotParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc index 38315ff571..3424a58167 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc @@ -18,7 +18,6 @@ #include #include #include -#include namespace mindspore { namespace lite { @@ -26,6 +25,9 @@ STATUS TflitePadParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TflitePadParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -74,7 +76,7 @@ STATUS TflitePadParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique return RET_NOT_SUPPORT; } } else { - MS_LOG(ERROR) << "this pad:" << node_name << " hasn't been supported"; + MS_LOG(ERROR) << node_name << " hasn't been supported"; return RET_NOT_FIND_OP; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.cc index a57fb77d91..c7af3f6419 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.cc @@ -18,13 +18,15 @@ #include #include #include -#include namespace mindspore { namespace lite { STATUS TflitePoolingParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_op, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -50,6 +52,9 @@ STATUS TflitePoolingParser::Parse(TfliteTensorsInfo *tensors_info, const std::un } else if (std::strcmp(node_name, "MaxPooling") == 0) { MS_LOG(DEBUG) << "parse TfliteMaxPoolingParser"; attr->poolingMode = schema::PoolMode_MAX_POOLING; + } else { + MS_LOG(ERROR) << node_name << " hasn't been supported"; + return RET_NOT_FIND_OP; } const auto &tflite_attr = tflite_op->builtin_options.AsPool2DOptions(); @@ -63,7 +68,6 @@ STATUS TflitePoolingParser::Parse(TfliteTensorsInfo *tensors_info, const std::un attr->strideH = tflite_attr->stride_h; attr->padMode = GetPadMode(tflite_attr->padding); attr->format = schema::Format::Format_NHWC; - attr->global = false; attr->roundMode = schema::RoundMode_FLOOR; attr->activationType = GetActivationFunctionType(tflite_attr->fused_activation_function); @@ -92,7 +96,7 @@ STATUS TflitePoolingParser::Parse(TfliteTensorsInfo *tensors_info, const std::un return RET_OK; } -TfliteNodeRegister g_tfliteMeanPoolingParser("MeanPooling", new TfliteMeanPoolingParser()); -TfliteNodeRegister g_tfliteMaxPoolingParser("MaxPooling", new TfliteMaxPoolingParser()); +TfliteNodeRegister g_tfliteMeanPoolingParser("MeanPooling", new TflitePoolingParser()); +TfliteNodeRegister g_tfliteMaxPoolingParser("MaxPooling", new TflitePoolingParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.h index c066b7ec11..56b17d7635 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_MEAN_POOLING_PARSER_H -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_MEAN_POOLING_PARSER_H +#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_POOLING_PARSER_H +#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_POOLING_PARSER_H #include #include @@ -33,17 +33,7 @@ class TflitePoolingParser : public TfliteNodeParser { const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) override; }; - -class TfliteMeanPoolingParser : public TflitePoolingParser { - public: - TfliteMeanPoolingParser() : TflitePoolingParser() {} -}; - -class TfliteMaxPoolingParser : public TflitePoolingParser { - public: - TfliteMaxPoolingParser() : TflitePoolingParser() {} -}; } // namespace lite } // namespace mindspore -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_CONV_PARSER_H +#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_POOLING_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_prelu_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_prelu_parser.cc index b71a5b0898..1026e5ea19 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_prelu_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_prelu_parser.cc @@ -18,7 +18,6 @@ #include "tools/converter/parser/tflite/tflite_prelu_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -26,6 +25,9 @@ STATUS TflitePReLUParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniq const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TflitePReLUParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.cc index 0d9910de39..c8d7bb28e0 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.cc @@ -16,7 +16,6 @@ #include "tools/converter/parser/tflite/tflite_quantize_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -24,6 +23,9 @@ STATUS TfliteQuantizeParser::Parse(TfliteTensorsInfo *tensors_info, const std::u const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteQuantizeNParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.cc index 90084ba4e9..867482a1db 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_range_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteRangeParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniq const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteRangeParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.cc index 9b83c05c31..fbc2f86f36 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_rank_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteRankParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniqu const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteRankParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.cc index bc0f90b75c..c5f9183ba1 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.cc @@ -18,13 +18,15 @@ #include #include #include -#include namespace mindspore { namespace lite { STATUS TfliteReduceParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_op, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -51,7 +53,6 @@ STATUS TfliteReduceParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni std::vector node_name_str; Split(op->name, &node_name_str, "-"); const char *node_name = node_name_str.data()->c_str(); - if (std::strcmp(node_name, "ReduceMax") == 0) { MS_LOG(DEBUG) << "parse TfliteReduceMaxParser"; attr->mode = schema::ReduceMode_ReduceMax; @@ -67,10 +68,9 @@ STATUS TfliteReduceParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni } else if (std::strcmp(node_name, "Mean") == 0) { MS_LOG(DEBUG) << "parse TfliteMeanParser"; attr->mode = schema::ReduceMode_ReduceMean; - } else if (std::strcmp(node_name, "ReduceAny") == 0) { - // attr->mode; - MS_LOG(ERROR) << "ms-lite haven't supported REDUCE_ANY now"; - return RET_NOT_SUPPORT; + } else { + MS_LOG(ERROR) << node_name << " hasn't been supported"; + return RET_NOT_FIND_OP; } if (GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, attr->axes)) { @@ -86,11 +86,10 @@ STATUS TfliteReduceParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni return RET_OK; } -TfliteNodeRegister g_TfliteSumParser("Sum", new TfliteSumParser()); -TfliteNodeRegister g_TfliteMeanParser("Mean", new TfliteMeanParser()); -TfliteNodeRegister g_TfliteReduceMaxParser("ReduceMax", new TfliteReduceMaxParser()); -TfliteNodeRegister g_TfliteReduceMinParser("ReduceMin", new TfliteReduceMinParser()); -TfliteNodeRegister g_TfliteReduceProdParser("ReduceProd", new TfliteReduceProdParser()); -TfliteNodeRegister g_TfliteReduceAnyParser("ReduceAny", new TfliteReduceAnyParser()); +TfliteNodeRegister g_tfliteSumParser("Sum", new TfliteReduceParser()); +TfliteNodeRegister g_tfliteMeanParser("Mean", new TfliteReduceParser()); +TfliteNodeRegister g_tfliteReduceMaxParser("ReduceMax", new TfliteReduceParser()); +TfliteNodeRegister g_tfliteReduceMinParser("ReduceMin", new TfliteReduceParser()); +TfliteNodeRegister g_tfliteReduceProdParser("ReduceProd", new TfliteReduceParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.h index d108f95474..a9340b3e1d 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.h @@ -33,36 +33,6 @@ class TfliteReduceParser : public TfliteNodeParser { const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) override; }; - -class TfliteReduceMaxParser : public TfliteReduceParser { - public: - TfliteReduceMaxParser() : TfliteReduceParser() {} -}; - -class TfliteReduceMinParser : public TfliteReduceParser { - public: - TfliteReduceMinParser() : TfliteReduceParser() {} -}; - -class TfliteReduceProdParser : public TfliteReduceParser { - public: - TfliteReduceProdParser() : TfliteReduceParser() {} -}; - -class TfliteSumParser : public TfliteReduceParser { - public: - TfliteSumParser() : TfliteReduceParser() {} -}; - -class TfliteMeanParser : public TfliteReduceParser { - public: - TfliteMeanParser() : TfliteReduceParser() {} -}; - -class TfliteReduceAnyParser : public TfliteReduceParser { - public: - TfliteReduceAnyParser() : TfliteReduceParser() {} -}; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.cc index 7f467b75ef..baa5b72f21 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.cc @@ -17,13 +17,15 @@ #include "tools/converter/parser/tflite/tflite_reshape_parser.h" #include #include -#include namespace mindspore { namespace lite { STATUS TfliteReshapeParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_op, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); MS_LOG(DEBUG) << "parse TfliteReshapeParser"; if (op == nullptr) { MS_LOG(ERROR) << "op is null"; @@ -75,8 +77,8 @@ STATUS TfliteReshapeParser::Parse(TfliteTensorsInfo *tensors_info, const std::un op->primitive->value.type = schema::PrimitiveType_Reshape; op->primitive->value.value = attr.release(); - for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_info, tflite_op->inputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int input : tflite_op->inputs) { + AddOpInput(op, tensors_info, input, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } AddOpOutput(op, tensors_info, tflite_op->outputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); return RET_OK; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.h index 3aa7380b8f..e6eff70a36 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.h @@ -36,4 +36,4 @@ class TfliteReshapeParser : public TfliteNodeParser { } // namespace lite } // namespace mindspore -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_ADD_PARSER_H +#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_RESHAPE_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.cc index 72085bc021..b385e5e7f3 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.cc @@ -25,6 +25,9 @@ namespace lite { STATUS TfliteResizeParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_op, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -42,7 +45,7 @@ STATUS TfliteResizeParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni } attr->coordinateTransformMode = schema::CoordinateTransformMode_COMMON; std::vector node_name_str; - Split(op->name.data(), &node_name_str, "-"); + Split(op->name, &node_name_str, "-"); const char *node_name = node_name_str.data()->c_str(); if (std::strcmp(node_name, "ResizeBilinear") == 0) { MS_LOG(DEBUG) << "parse TfliteResizeBilinearParser"; @@ -118,7 +121,7 @@ STATUS TfliteResizeParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni return RET_OK; } -TfliteNodeRegister g_tfliteResizeBilinearParser("ResizeBilinear", new TfliteResizeBilinearParser()); -TfliteNodeRegister g_tfliteResizeNearestNeighborParser("NearestNeighbor", new TfliteResizeNearestNeighborParser()); +TfliteNodeRegister g_tfliteResizeBilinearParser("ResizeBilinear", new TfliteResizeParser()); +TfliteNodeRegister g_tfliteResizeNearestNeighborParser("NearestNeighbor", new TfliteResizeParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.h index 8151984eb2..ead33d090a 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.h @@ -33,16 +33,6 @@ class TfliteResizeParser : public TfliteNodeParser { const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) override; }; - -class TfliteResizeBilinearParser : public TfliteResizeParser { - public: - TfliteResizeBilinearParser() : TfliteResizeParser() {} -}; - -class TfliteResizeNearestNeighborParser : public TfliteResizeParser { - public: - TfliteResizeNearestNeighborParser() : TfliteResizeParser() {} -}; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.cc index 2eac035f99..d92777d560 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_reverse_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteReverseParser::Parse(TfliteTensorsInfo *tensors_info, const std::un const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteReverseParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.cc index 23a958b269..d0f9db6f52 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.cc @@ -18,7 +18,6 @@ #include "tools/converter/parser/tflite/tflite_reverse_sequence_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -28,6 +27,9 @@ STATUS TfliteReverseSequenceParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteReverseSequenceParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.cc index a91b32325d..15eb9dc3dc 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.cc @@ -17,8 +17,6 @@ #include "tools/converter/parser/tflite/tflite_scatter_nd_parser.h" #include #include -#include -#include namespace mindspore { namespace lite { @@ -27,6 +25,9 @@ STATUS TfliteScatterNdParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteScatterNdParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -51,8 +52,6 @@ STATUS TfliteScatterNdParser::Parse(TfliteTensorsInfo *tensors_info, op->primitive->value.type = schema::PrimitiveType_ScatterND; op->primitive->value.value = attr.release(); - // in tflite, kIndices = 0, kUpdates = 1, kShape = 2 - // in mslite, kScatterShapeIndex = 0, kScatterIndicesIndex = 1, kScatterUpdateIndex = 2; AddOpInput(op, tensors_info, tflite_op->inputs[2], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); AddOpInput(op, tensors_info, tflite_op->inputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); AddOpInput(op, tensors_info, tflite_op->inputs[1], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); @@ -60,6 +59,6 @@ STATUS TfliteScatterNdParser::Parse(TfliteTensorsInfo *tensors_info, return RET_OK; } -TfliteNodeRegister g_TfliteScatterNdParser("ScatterNd", new TfliteScatterNdParser()); +TfliteNodeRegister g_tfliteScatterNdParser("ScatterNd", new TfliteScatterNdParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.cc index 58e1f5b9f7..b28b67d369 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_shape_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteShapeParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniq const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteShapeParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -49,6 +51,6 @@ STATUS TfliteShapeParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniq return RET_OK; } -TfliteNodeRegister g_TfliteShapeParser("Shape", new TfliteShapeParser()); +TfliteNodeRegister g_tfliteShapeParser("Shape", new TfliteShapeParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_skip_gram_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_skip_gram_parser.cc index 22e875a003..89a1a0cac8 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_skip_gram_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_skip_gram_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_skip_gram_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteSkipGramParser::Parse(TfliteTensorsInfo *tensors_info, const std::u const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteSkipGramParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -58,6 +60,6 @@ STATUS TfliteSkipGramParser::Parse(TfliteTensorsInfo *tensors_info, const std::u return RET_OK; } -TfliteNodeRegister g_TfliteSkiGramParser("SKipGram", new TfliteSkipGramParser()); +TfliteNodeRegister g_tfliteSkiGramParser("SKipGram", new TfliteSkipGramParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.cc index 3dffbd6642..e58441a40c 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_slice_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteSliceParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniq const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteSliceParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.cc index b170a79c43..53a36fc93c 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.cc @@ -24,6 +24,9 @@ STATUS TfliteSoftmaxParser::Parse(TfliteTensorsInfo *tensors_info, const std::un const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteSoftmaxParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.h index a0e635db33..f5da7a7ec8 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_CONV_PARSER_H -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_CONV_PARSER_H +#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_SOFTMAX_PARSER_H +#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_SOFTMAX_PARSER_H #include #include @@ -36,4 +36,4 @@ class TfliteSoftmaxParser : public TfliteNodeParser { }; } // namespace mindspore::lite -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_CONV_PARSER_H +#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_SOFTMAX_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.cc index 0b6043ce27..297c047912 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.cc @@ -18,7 +18,6 @@ #include "tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -28,6 +27,9 @@ STATUS TfliteSpaceToBatchNDParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteSpaceToBatchNDParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.cc index b74378dfb0..c139e432b6 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.cc @@ -18,7 +18,6 @@ #include "tools/converter/parser/tflite/tflite_space_to_depth_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -27,6 +26,9 @@ STATUS TfliteSpaceToDepthParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteSpaceToDepthParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.cc index 87fbf30b63..789dd965ed 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.cc @@ -18,7 +18,6 @@ #include "tools/converter/parser/tflite/tflite_sparse_to_dense_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -27,6 +26,9 @@ STATUS TfliteSparseToDenseParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteSparseToDenseParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -44,6 +46,7 @@ STATUS TfliteSparseToDenseParser::Parse(TfliteTensorsInfo *tensors_info, } attr->validateIndices = false; + op->primitive->value.type = schema::PrimitiveType_SparseToDense; op->primitive->value.value = attr.release(); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.cc index 499fdd0683..c38cf5def2 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.cc @@ -25,6 +25,9 @@ STATUS TfliteSplitParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniq const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteSplitParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -54,6 +57,7 @@ STATUS TfliteSplitParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniq return RET_NULL_PTR; } const auto tensor_shape = shape_tensor->shape; + const auto &axis_tensor = tflite_subgraph->tensors[tflite_op->inputs[0]]; if (axis_tensor == nullptr) { MS_LOG(ERROR) << "axis_tensor is null"; @@ -68,6 +72,10 @@ STATUS TfliteSplitParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniq return RET_ERROR; } attr->splitDim = axis; + if (num_splits == 0) { + MS_LOG(ERROR) << "Divide-by-zero error!"; + return RET_ERROR; + } if (tensor_shape[axis] % num_splits != 0 && tensor_shape[axis] / num_splits != 0) { MS_LOG(ERROR) << "num_splits can't divide tensor's length at axis " << axis; return RET_ERROR; @@ -83,12 +91,12 @@ STATUS TfliteSplitParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniq op->primitive->value.value = attr.release(); AddOpInput(op, tensors_info, tflite_op->inputs[1], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); - for (size_t i = 0; i < tflite_op->outputs.size(); i++) { - AddOpOutput(op, tensors_info, tflite_op->outputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int output : tflite_op->outputs) { + AddOpOutput(op, tensors_info, output, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } return RET_OK; } -TfliteNodeRegister g_TfliteSplitParser("Split", new TfliteSplitParser()); +TfliteNodeRegister g_tfliteSplitParser("Split", new TfliteSplitParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.cc index 9cbb69e765..b02ccd484b 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.cc @@ -25,7 +25,9 @@ STATUS TfliteSplitVParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteSplitVParser"; - + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -65,7 +67,12 @@ STATUS TfliteSplitVParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni MS_LOG(ERROR) << "axis_tensor is null"; return RET_NULL_PTR; } - auto axis = *(reinterpret_cast(tflite_model->buffers[axis_tensor->buffer]->data.data())); + const auto &axis_buf = tflite_model->buffers[axis_tensor->buffer]; + if (axis_buf == nullptr) { + MS_LOG(ERROR) << "axis_buf is null"; + return RET_NULL_PTR; + } + auto axis = *(reinterpret_cast(axis_buf->data.data())); if (axis < 0) { axis += tensor_shape.size(); } @@ -79,12 +86,12 @@ STATUS TfliteSplitVParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni op->primitive->value.value = attr.release(); AddOpInput(op, tensors_info, tflite_op->inputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); - for (size_t i = 0; i < tflite_op->outputs.size(); i++) { - AddOpOutput(op, tensors_info, tflite_op->outputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int output : tflite_op->outputs) { + AddOpOutput(op, tensors_info, output, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } return RET_OK; } -TfliteNodeRegister g_TfliteSplitVParser("SplitV", new TfliteSplitVParser()); +TfliteNodeRegister g_tfliteSplitVParser("SplitV", new TfliteSplitVParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.cc index f127f95d77..61de999fc9 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_squeeze_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteSqueezeParser::Parse(TfliteTensorsInfo *tensors_info, const std::un const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteSqueezeParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -56,6 +58,6 @@ STATUS TfliteSqueezeParser::Parse(TfliteTensorsInfo *tensors_info, const std::un return RET_OK; } -TfliteNodeRegister g_TfliteSqueezeParser("Squeeze", new TfliteSqueezeParser()); +TfliteNodeRegister g_tfliteSqueezeParser("Squeeze", new TfliteSqueezeParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.cc index f7c5e4f48e..f810b4f611 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_stack_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -25,6 +24,9 @@ STATUS TfliteStackParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniq const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteStackParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -54,8 +56,8 @@ STATUS TfliteStackParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniq op->primitive->value.type = schema::PrimitiveType_Stack; op->primitive->value.value = attr.release(); - for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_info, tflite_op->inputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int input : tflite_op->inputs) { + AddOpInput(op, tensors_info, input, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } AddOpOutput(op, tensors_info, tflite_op->outputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); return RET_OK; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.cc index 5437bd9c65..b9dab37019 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.cc @@ -17,7 +17,6 @@ #include "tools/converter/parser/tflite/tflite_strided_slice_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -26,6 +25,9 @@ STATUS TfliteStridedSliceParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteStridedSliceParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -41,7 +43,6 @@ STATUS TfliteStridedSliceParser::Parse(TfliteTensorsInfo *tensors_info, MS_LOG(ERROR) << "new op failed"; return RET_NULL_PTR; } - const auto &tflite_attr = tflite_op->builtin_options.AsStridedSliceOptions(); if (tflite_attr == nullptr) { MS_LOG(ERROR) << "get op: %s attr failed", op->name.c_str(); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.cc index 9569d7a838..74a2323d4b 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.cc @@ -18,7 +18,6 @@ #include "tools/converter/parser/tflite/tflite_tile_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -26,6 +25,9 @@ STATUS TfliteTileParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniqu const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteTileParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.cc index a75354df32..0e3ff760f6 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.cc @@ -26,6 +26,9 @@ STATUS TfliteTopKV2Parser::Parse(TfliteTensorsInfo *tensors_info, const std::uni const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteTopKV2Parser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -54,8 +57,8 @@ STATUS TfliteTopKV2Parser::Parse(TfliteTensorsInfo *tensors_info, const std::uni op->primitive->value.value = attr.release(); AddOpInput(op, tensors_info, tflite_op->inputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); - for (size_t i = 0; i < tflite_op->outputs.size(); i++) { - AddOpOutput(op, tensors_info, tflite_op->outputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int output : tflite_op->outputs) { + AddOpOutput(op, tensors_info, output, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.cc index 9a915bc9dc..4792adb14c 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.cc @@ -25,6 +25,9 @@ STATUS TfliteTransposeParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteTransposeParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -45,7 +48,6 @@ STATUS TfliteTransposeParser::Parse(TfliteTensorsInfo *tensors_info, MS_LOG(ERROR) << "get transpose -> perm failed"; return RET_ERROR; } - attr->conjugate = false; op->primitive->value.type = schema::PrimitiveType_Transpose; op->primitive->value.value = attr.release(); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.cc index 68683e22c4..f6951eb869 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.cc @@ -18,7 +18,6 @@ #include "tools/converter/parser/tflite/tflite_unique_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -26,6 +25,9 @@ STATUS TfliteUniqueParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteUniqueParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -53,8 +55,8 @@ STATUS TfliteUniqueParser::Parse(TfliteTensorsInfo *tensors_info, const std::uni op->primitive->value.value = attr.release(); AddOpInput(op, tensors_info, tflite_op->inputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); - for (size_t i = 0; i < tflite_op->outputs.size(); i++) { - AddOpOutput(op, tensors_info, tflite_op->outputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int output : tflite_op->outputs) { + AddOpOutput(op, tensors_info, output, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.cc index 59d1da4f45..76bf61d1c3 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.cc @@ -18,7 +18,6 @@ #include "tools/converter/parser/tflite/tflite_unstack_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -26,6 +25,9 @@ STATUS TfliteUnstackParser::Parse(TfliteTensorsInfo *tensors_info, const std::un const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "paser TfliteUnstackParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -54,8 +56,8 @@ STATUS TfliteUnstackParser::Parse(TfliteTensorsInfo *tensors_info, const std::un op->primitive->value.value = attr.release(); AddOpInput(op, tensors_info, tflite_op->inputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); - for (size_t i = 0; i < tflite_op->outputs.size(); i++) { - AddOpOutput(op, tensors_info, tflite_op->outputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int output : tflite_op->outputs) { + AddOpOutput(op, tensors_info, output, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc index f8ed0aa10a..01c309a347 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc @@ -176,7 +176,7 @@ size_t GetDataTypeSize(const TypeId &data_type) { case TypeId::kNumberTypeFloat32: return sizeof(float); case TypeId::kNumberTypeFloat16: - return sizeof(float) >> 1; + return sizeof(float) / 2; case TypeId::kNumberTypeInt8: return sizeof(int8_t); case TypeId::kNumberTypeInt32: @@ -237,6 +237,11 @@ STATUS getPaddingParam(const std::unique_ptr &tensor, schema::P } void Split(const std::string &src_str, std::vector *dst_str, const std::string &chr) { + MS_ASSERT(dst_str != nullptr); + if (src_str.empty()) { + MS_LOG(ERROR) << "src_str is empty"; + return; + } std::string ::size_type p1 = 0, p2 = src_str.find(chr); while (std::string::npos != p2) { dst_str->push_back(src_str.substr(p1, p2 - p1)); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.cc index e74977d8fd..d5985a7275 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.cc @@ -18,7 +18,6 @@ #include "tools/converter/parser/tflite/tflite_where_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -26,6 +25,9 @@ STATUS TfliteWhereParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniq const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteWhereParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -50,8 +52,8 @@ STATUS TfliteWhereParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniq op->primitive->value.type = schema::PrimitiveType_Where; op->primitive->value.value = attr.release(); - for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_info, tflite_op->inputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int input : tflite_op->inputs) { + AddOpInput(op, tensors_info, input, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } AddOpOutput(op, tensors_info, tflite_op->outputs[0], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); return RET_OK; diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_while_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_while_parser.cc index 9b01d34656..68aff392e6 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_while_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_while_parser.cc @@ -25,6 +25,9 @@ STATUS TfliteWhileParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniq const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteWhileParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; @@ -46,18 +49,17 @@ STATUS TfliteWhileParser::Parse(TfliteTensorsInfo *tensors_info, const std::uniq MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed"; return RET_NULL_PTR; } - attr->condSubgraphIndex = tflite_attr->cond_subgraph_index; attr->bodySubgraphIndex = tflite_attr->body_subgraph_index; op->primitive->value.type = schema::PrimitiveType_While; op->primitive->value.value = attr.release(); - for (size_t i = 0; i < tflite_op->inputs.size(); i++) { - AddOpInput(op, tensors_info, tflite_op->inputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int input : tflite_op->inputs) { + AddOpInput(op, tensors_info, input, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } - for (size_t i = 0; i < tflite_op->outputs.size(); i++) { - AddOpOutput(op, tensors_info, tflite_op->outputs[i], tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); + for (int output : tflite_op->outputs) { + AddOpOutput(op, tensors_info, output, tflite_subgraph->tensors.size(), schema::Format::Format_NHWC); } return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_while_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_while_parser.h index 0cd2621ce4..258c0caa28 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_while_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_while_parser.h @@ -36,4 +36,4 @@ class TfliteWhileParser : public TfliteNodeParser { } // namespace lite } // namespace mindspore -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_WHERE_PARSER_H +#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_WHILE_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.cc index eac52201ee..09c02fa032 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.cc @@ -18,7 +18,6 @@ #include "tools/converter/parser/tflite/tflite_zeros_like_parser.h" #include #include -#include namespace mindspore { namespace lite { @@ -27,6 +26,9 @@ STATUS TfliteZerosLikeParser::Parse(TfliteTensorsInfo *tensors_info, const std::unique_ptr &tflite_model, const std::unique_ptr &tflite_subgraph, schema::CNodeT *op) { MS_LOG(DEBUG) << "parse TfliteZerosLikeParser"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + MS_ASSERT(tflite_subgraph != nullptr); if (op == nullptr) { MS_LOG(ERROR) << "op is null"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/schema_gen/schema_gen.h b/mindspore/lite/tools/schema_gen/schema_gen.h index 631947e01c..1a22f7e0b3 100644 --- a/mindspore/lite/tools/schema_gen/schema_gen.h +++ b/mindspore/lite/tools/schema_gen/schema_gen.h @@ -22,7 +22,7 @@ namespace mindspore::lite { class SchemaGenFlags : public virtual FlagParser { public: SchemaGenFlags() { AddFlag(&SchemaGenFlags::export_path_, "exportPath", "schema define export path", "."); } - ~SchemaGenFlags() = default; + ~SchemaGenFlags() override = default; public: std::string export_path_ = "."; diff --git a/mindspore/lite/tools/schema_gen/schema_type_def.h b/mindspore/lite/tools/schema_gen/schema_type_def.h index 73b82b52fe..a8a26138b1 100644 --- a/mindspore/lite/tools/schema_gen/schema_type_def.h +++ b/mindspore/lite/tools/schema_gen/schema_type_def.h @@ -15,6 +15,7 @@ */ #ifndef MINDSPORE_LITE_TOOLS_SCHEMA_GEN_SCHEMA_TYPE_DEF_H_ #define MINDSPORE_LITE_TOOLS_SCHEMA_GEN_SCHEMA_TYPE_DEF_H_ + #include #include "tools/schema_gen/schema_type_register.h" diff --git a/mindspore/lite/tools/schema_gen/schema_type_register.h b/mindspore/lite/tools/schema_gen/schema_type_register.h index 9970bda529..d9c69e95d7 100644 --- a/mindspore/lite/tools/schema_gen/schema_type_register.h +++ b/mindspore/lite/tools/schema_gen/schema_type_register.h @@ -15,12 +15,14 @@ */ #ifndef MINDSPORE_LITE_TOOLS_SCHEMA_GEN_SCHEMA_TYPE_REGISTER_H_ #define MINDSPORE_LITE_TOOLS_SCHEMA_GEN_SCHEMA_TYPE_REGISTER_H_ +#include + #include "src/ops/schema_register.h" namespace mindspore::lite::ops { class SchemaTypeRegister { public: - explicit SchemaTypeRegister(GetSchemaDef func) { SchemaRegisterImpl::Instance()->TypePush(func); } + explicit SchemaTypeRegister(GetSchemaDef func) { SchemaRegisterImpl::Instance()->TypePush(std::move(func)); } ~SchemaTypeRegister() = default; }; } // namespace mindspore::lite::ops