diff --git a/parser/caffe/caffe_parser.cc b/parser/caffe/caffe_parser.cc index 7f8ac0c..6ce8af7 100644 --- a/parser/caffe/caffe_parser.cc +++ b/parser/caffe/caffe_parser.cc @@ -74,6 +74,7 @@ using std::ifstream; namespace { const size_t kMaxErrStrLen = 128U; +std::map, std::vector> params_share_map; } // namespace namespace ge { @@ -282,7 +283,7 @@ Status CheckPathValid(const char *model_path, const string &custom_proto, string const set CaffeWeightsParser::skiped_layer_type_ = {"Split", "SoftmaxWithLoss", "Accuracy", "Data", "Dropout", "MultiLabelLMDB", "Python", "AnnotatedData"}; -Status CaffeModelParser::ParseInput(domi::caffe::NetParameter &proto_message, bool &input_data_flag) { +Status CaffeModelParser::ParseInput(domi::caffe::NetParameter &proto_message, bool &input_data_flag) const { if (proto_message.input_size() > 0) { GELOGI("This net exsit input."); @@ -456,7 +457,7 @@ Status CaffeModelParser::CustomProtoParse(const char *model_path, const string & return ret; } -Status CaffeModelParser::ReadModelWithoutWarning(const char *model_path, google::protobuf::Message *message) { +Status CaffeModelParser::ReadModelWithoutWarning(const char *model_path, google::protobuf::Message *message) const { int32_t copy_fd = mmDup(STDERR_FILENO); if (copy_fd < 0) { char_t err_buf[kMaxErrStrLen + 1U] = {}; @@ -536,7 +537,7 @@ Status CaffeModelParser::ReadCaffeModelFromText(const char *model_path, google:: Status CaffeModelParser::ParseLayerParameter(const google::protobuf::Descriptor *layer_descriptor, const google::protobuf::Message *message, - vector &operators) { + vector &operators) const { auto field_name = layer_descriptor->FindFieldByName(kFieldName); CAFFE_CHECK_NULL_AND_REPROT_ERRORMSG(field_name, "Does not find name in google::protobuf::Descriptor"); auto field_type = layer_descriptor->FindFieldByName(kFieldType); @@ -624,7 +625,7 @@ void CaffeModelParser::AddOutputInfoToContext(string layer_name, int32_t top_ind ge::GetParserContext().user_out_nodes.push_back(std::make_pair(layer_name, top_index)); } -Status CaffeModelParser::ParseOutputNodeTopInfo(const domi::caffe::NetParameter &proto_message) { +Status CaffeModelParser::ParseOutputNodeTopInfo(const domi::caffe::NetParameter &proto_message) const { if (ge::GetParserContext().user_out_tensors.empty()) { return SUCCESS; } @@ -932,7 +933,7 @@ Status CaffeModelParser::AddTensorDescToOpDesc(ge::OpDescPtr &op_desc, const dom } Status CaffeModelParser::AddTensorDescToOpDescByIr(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer, - const string &op_type) { + const string &op_type) const { if (std::find(kAddTensorIrSkipNodes.begin(), kAddTensorIrSkipNodes.end(), op_type) != kAddTensorIrSkipNodes.end()) { op_desc = ge::parser::MakeShared(layer.name(), op_type); GE_CHECK_NOTNULL(op_desc); @@ -1202,7 +1203,7 @@ std::string CaffeModelParser::RemapTopNameByLayer(const domi::caffe::LayerParame return (top_name + "_" + layer.name() + "_" + std::to_string(index)); } -Status CaffeModelParser::PreCheck(const domi::caffe::NetParameter &net) { +Status CaffeModelParser::PreCheck(const domi::caffe::NetParameter &net) const { // Add layer in the model to PreChecker and check the general parameters PreChecker::Instance().SetModelName(net.name()); for (int i = 0; i < net.layer_size(); i++) { @@ -1977,7 +1978,7 @@ Status CaffeWeightsParser::ParseLayerField(const google::protobuf::Reflection *r } Status CaffeWeightsParser::ConvertBlobsProto(const google::protobuf::Message *message, - google::protobuf::Message *blobs) { + google::protobuf::Message *blobs) const { const google::protobuf::Reflection *blobs_reflection = message->GetReflection(); CAFFE_CHECK_NULL_AND_REPROT_ERRORMSG(blobs_reflection, "Get Reflection failed in google::protobuf::Message"); vector field_desc; diff --git a/parser/caffe/caffe_parser.h b/parser/caffe/caffe_parser.h index 8b08b9a..3b4491d 100644 --- a/parser/caffe/caffe_parser.h +++ b/parser/caffe/caffe_parser.h @@ -52,12 +52,11 @@ using std::string; using std::unordered_map; using std::vector; using domi::Status; -static std::map, std::vector> params_share_map; class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser { public: CaffeModelParser() {} - virtual ~CaffeModelParser() override {} + ~CaffeModelParser() override {} /** * @ingroup domi_omg @@ -145,7 +144,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser { * @return SUCCESS build successfully * @return FAILED build failed */ - Status PreCheck(const domi::caffe::NetParameter &net); + Status PreCheck(const domi::caffe::NetParameter &net) const; /** * @ingroup domi_omg @@ -156,7 +155,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser { * @return SUCCESS build successfully * @return FAILED build failed */ - Status ParseInput(domi::caffe::NetParameter &proto_message, bool &input_data_flag); + Status ParseInput(domi::caffe::NetParameter &proto_message, bool &input_data_flag) const; /* * @ingroup domi_omg @@ -192,7 +191,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser { * @return SUCCESS read file successfully * @return FAILED read file failed */ - Status ReadModelWithoutWarning(const char *model_path, google::protobuf::Message *message); + Status ReadModelWithoutWarning(const char *model_path, google::protobuf::Message *message) const; /* * @ingroup domi_omg @@ -214,7 +213,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser { * @return FAILED parse layer failed */ Status ParseLayerParameter(const google::protobuf::Descriptor *layer_descriptor, - const google::protobuf::Message *message, std::vector &operators); + const google::protobuf::Message *message, std::vector &operators) const; /* * @ingroup domi_omg @@ -301,7 +300,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser { Status AddTensorDescToOpDesc(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer) const; Status AddTensorDescToOpDescByIr(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer, - const string &op_type); + const string &op_type) const; Status AddUserOutNodesTop(); @@ -321,7 +320,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser { void AddOutputInfoToContext(string layer_name, int32_t top_index) const; - Status ParseOutputNodeTopInfo(const domi::caffe::NetParameter &proto_message); + Status ParseOutputNodeTopInfo(const domi::caffe::NetParameter &proto_message) const; Status SaveDataLayerTops(const domi::caffe::LayerParameter &layer); @@ -405,7 +404,7 @@ class PARSER_FUNC_VISIBILITY CaffeWeightsParser : public domi::WeightsParser { google::protobuf::Message *layer); Status ConvertBlobsProto(const google::protobuf::Message *message, - google::protobuf::Message *blobs); + google::protobuf::Message *blobs) const; Status ConvertBlobShapeProto(const google::protobuf::Message *message, google::protobuf::Message *dest_message) const; diff --git a/parser/common/acl_graph_parser_util.cc b/parser/common/acl_graph_parser_util.cc index 2721c01..97abc4a 100644 --- a/parser/common/acl_graph_parser_util.cc +++ b/parser/common/acl_graph_parser_util.cc @@ -266,7 +266,7 @@ void AclGrphParseUtil::SetDefaultFormat() { } } -domi::Status AclGrphParseUtil::ParseAclOutputNodes(const string &out_nodes) { +domi::Status AclGrphParseUtil::ParseAclOutputNodes(const string &out_nodes) const { try { ge::GetParserContext().out_nodes_map.clear(); ge::GetParserContext().user_out_nodes.clear(); @@ -492,7 +492,7 @@ domi::Status AclGrphParseUtil::GetOutputLeaf(NodePtr node, } domi::Status AclGrphParseUtil::GetDefaultOutInfo(ge::ComputeGraphPtr &compute_graph, - std::vector> &output_nodes_info) { + std::vector> &output_nodes_info) const { std::vector> default_out_nodes = ge::GetParserContext().default_out_nodes; if (!default_out_nodes.empty()) { for (size_t i = 0; i < default_out_nodes.size(); ++i) { @@ -587,7 +587,7 @@ domi::Status AclGrphParseUtil::CheckOptions(const std::map::const_iterator it = ge::ir_option::ir_parser_suppported_options.find(key_str); if (it == ge::ir_option::ir_parser_suppported_options.end()) { ErrorManager::GetInstance().ATCReportErrMessage("E10016", {"parameter", "opname"}, {"parser_params", key_str}); GELOGE(PARAM_INVALID, "[Check][Param] Input options include unsupported option(%s).Please check!", key_ascend); @@ -651,7 +651,7 @@ domi::Status AclGrphParseUtil::ParseParamsBeforeGraph(const std::map &parser_params) { + const std::map &parser_params) const { // support paragrams: input_fp16_nodes, is_input_adjust_hw_layout, ComputeGraphPtr compute_graph = GraphUtils::GetComputeGraph(graph); GE_CHECK_NOTNULL(compute_graph); @@ -943,7 +943,7 @@ FMK_FUNC_HOST_VISIBILITY bool ValidateStr(const std::string &filePath, const std regex_t reg; int cflags = REG_EXTENDED | REG_NOSUB; int ret = regcomp(®, mode.c_str(), cflags); - if (ret) { + if (ret != 0) { regerror(ret, ®, ebuff, kMaxBuffSize); GELOGW("regcomp failed, reason: %s", ebuff); regfree(®); @@ -951,7 +951,7 @@ FMK_FUNC_HOST_VISIBILITY bool ValidateStr(const std::string &filePath, const std } ret = regexec(®, filePath.c_str(), 0, nullptr, 0); - if (ret) { + if (ret != 0) { regerror(ret, ®, ebuff, kMaxBuffSize); GELOGE(ge::PARAM_INVALID, "[Invoke][RegExec] failed, reason: %s", ebuff); regfree(®); diff --git a/parser/common/acl_graph_parser_util.h b/parser/common/acl_graph_parser_util.h index 4c63c7e..8af1d27 100644 --- a/parser/common/acl_graph_parser_util.h +++ b/parser/common/acl_graph_parser_util.h @@ -44,7 +44,8 @@ class AclGrphParseUtil { domi::Status SetOutputNodeInfo(ge::Graph &graph, const std::map &parser_params); domi::Status ParseParamsBeforeGraph(const std::map &parser_params, std::string &graph_name); - domi::Status ParseParamsAfterGraph(ge::Graph &graph, const std::map &parser_params); + domi::Status ParseParamsAfterGraph(ge::Graph &graph, const std::map &parser_params) const; private: bool parser_initialized = false; @@ -53,7 +54,7 @@ class AclGrphParseUtil { void CreateOutputNodesInfo(std::vector> &output_nodes_info, std::vector &output_nodes_name) const; static void SetDefaultFormat(); - domi::Status ParseAclOutputNodes(const std::string &out_nodes); + domi::Status ParseAclOutputNodes(const std::string &out_nodes) const; domi::Status ParseAclOutputFp16NodesFormat(const std::string &is_output_fp16) const; domi::Status ParseAclEnableScope(const std::string &enable_scope_fusion_passes) const; static void AddAttrsForInputNodes(const vector &adjust_fp16_format_vec, const string &fp16_nodes_name, @@ -61,7 +62,7 @@ class AclGrphParseUtil { domi::Status ParseAclInputFp16Nodes(const ComputeGraphPtr &graph, const string &input_fp16_nodes, const string &is_input_adjust_hw_layout) const; domi::Status GetDefaultOutInfo(ge::ComputeGraphPtr &compute_graph, - std::vector> &output_nodes_info); + std::vector> &output_nodes_info) const; }; namespace parser { diff --git a/parser/common/model_saver.cc b/parser/common/model_saver.cc index 2963ad3..21d9368 100644 --- a/parser/common/model_saver.cc +++ b/parser/common/model_saver.cc @@ -77,7 +77,7 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelSaver::SaveJsonToFi const char *model_char = model_str.c_str(); uint32_t len = static_cast(model_str.length()); // Write data to file - mmSsize_t mmpa_ret = mmWrite(fd, const_cast((const void *)model_char), len); + mmSsize_t mmpa_ret = mmWrite(fd, const_cast(static_cast(model_char)), len); if (mmpa_ret == EN_ERROR || mmpa_ret == EN_INVALID_PARAM) { char_t err_buf[kMaxErrStrLen + 1U] = {}; const auto err_msg = mmGetErrorFormatMessage(mmGetErrorCode(), &err_buf[0], kMaxErrStrLen); diff --git a/parser/common/parser_fp16_t.cc b/parser/common/parser_fp16_t.cc index eca3cae..cf31644 100644 --- a/parser/common/parser_fp16_t.cc +++ b/parser/common/parser_fp16_t.cc @@ -48,7 +48,7 @@ static bool IsRoundOne(uint64_t man, uint16_t trunc_len) { uint64_t mask0 = 0x4; uint64_t mask1 = 0x2; uint64_t mask2; - uint16_t shift_out = static_cast(trunc_len - kDim2); + uint16_t shift_out = static_cast(trunc_len - static_cast(kDim2)); mask0 = mask0 << shift_out; mask1 = mask1 << shift_out; mask2 = mask1 - 1; @@ -89,7 +89,7 @@ static float Fp16ToFloat(const uint16_t &fp_val) { int16_t hf_exp; ExtractFp16(fp_val, hf_sign, hf_exp, hf_man); - while (hf_man && !(hf_man & kFp16ManHideBit)) { + while ((hf_man != 0U) && ((hf_man & kFp16ManHideBit) == 0U)) { hf_man <<= 1; hf_exp--; } @@ -120,7 +120,7 @@ static double Fp16ToDouble(const uint16_t &fp_val) { int16_t hf_exp; ExtractFp16(fp_val, hf_sign, hf_exp, hf_man); - while (hf_man && !(hf_man & kFp16ManHideBit)) { + while ((hf_man != 0U) && ((hf_man & kFp16ManHideBit) == 0U)) { hf_man <<= 1; hf_exp--; } @@ -128,7 +128,7 @@ static double Fp16ToDouble(const uint16_t &fp_val) { uint64_t e_ret; uint64_t m_ret; uint64_t s_ret = hf_sign; - if (!hf_man) { + if (hf_man == 0U) { e_ret = 0; m_ret = 0; } else { @@ -256,7 +256,7 @@ static uint8_t Fp16ToUInt8(const uint16_t &fp_val) { shift_out++; } } - if (!overflow_flag) { + if (overflow_flag == 0U) { bool need_round = IsRoundOne(long_int_m, shift_out + kFp16ManLen); m_ret = static_cast((long_int_m >> (kFp16ManLen + shift_out)) & kBitLen8Max); if (need_round && m_ret != kBitLen8Max) { @@ -290,7 +290,7 @@ static uint16_t GetUint16ValByMan(uint16_t s_ret, const uint64_t &long_int_m, co if (m_ret == 0) { s_ret = 0; } - return static_cast((s_ret << kBitShift15) | (m_ret)); + return static_cast((s_ret << static_cast(kBitShift15)) | (m_ret)); } /// @ingroup fp16_t math conversion static method @@ -431,7 +431,7 @@ static int32_t Fp16ToInt32(const uint16_t &fp_val) { s_ret = 0; } // Generate final result - ret_v = (s_ret << kBitShift31) | (m_ret); + ret_v = (s_ret << static_cast(kBitShift31)) | (m_ret); } return *(ge::PtrToPtr(&ret_v)); @@ -565,7 +565,7 @@ static uint16_t Fp16Add(uint16_t v_1, uint16_t v_2) { m_trunc = (m_b << (static_cast(kBitShift32) - static_cast(e_tmp))); m_b = RightShift(m_b, e_tmp); } else if (e_a < e_b) { - m_trunc = (m_a << (kBitShift32 - static_cast(e_tmp))); + m_trunc = (m_a << (static_cast(kBitShift32) - static_cast(e_tmp))); m_a = RightShift(m_a, e_tmp); } // calculate mantissav @@ -603,7 +603,7 @@ static uint16_t Fp16Mul(uint16_t v_1, uint16_t v_2) { m_a = m_a_tmp; m_b = m_b_tmp; - e_ret = ((e_a + e_b) - kFp16ExpBias) - kDim10; + e_ret = ((e_a + e_b) - kFp16ExpBias) - static_cast(kDim10); mul_m = m_a * m_b; s_ret = s_a ^ s_b; @@ -905,7 +905,7 @@ fp16_t &fp16_t::operator=(const float &f_val) { fp16_t &fp16_t::operator=(const int8_t &i_val) { uint16_t s_ret, e_ret, m_ret; - s_ret = static_cast(((static_cast(i_val)) & 0x80) >> kDim7); + s_ret = static_cast(((static_cast(i_val)) & 0x80) >> static_cast(kDim7)); m_ret = static_cast(((static_cast(i_val)) & kInt8Max)); if (m_ret == 0) { @@ -952,14 +952,14 @@ static void SetValByUint16Val(const uint16_t &input_val, const uint16_t &sign, u uint16_t len = static_cast(GetManBitLength(m_tmp)); if (static_cast(m_tmp)) { int16_t e_ret; - if (len > kDim11) { + if (len > static_cast(kDim11)) { e_ret = kFp16ExpBias + kFp16ManLen; uint16_t e_tmp = len - static_cast(kDim11); uint32_t trunc_mask = 1; for (int i = 1; i < e_tmp; i++) { trunc_mask = (trunc_mask << 1) + 1; } - uint32_t m_trunc = (m_tmp & trunc_mask) << (kBitShift32 - e_tmp); + uint32_t m_trunc = (m_tmp & trunc_mask) << (static_cast(kBitShift32) - e_tmp); for (int i = 0; i < e_tmp; i++) { m_tmp = (m_tmp >> 1); e_ret = e_ret + 1; @@ -991,7 +991,7 @@ fp16_t &fp16_t::operator=(const int16_t &i_val) { val = 0; } else { uint16_t ui_val = *(ge::PtrToPtr(&i_val)); - auto s_ret = static_cast(ui_val >> kBitShift15); + auto s_ret = static_cast(ui_val >> static_cast(kBitShift15)); if (static_cast(s_ret)) { int16_t iValM = -i_val; ui_val = *(ge::PtrToPtr(&iValM)); @@ -1018,7 +1018,7 @@ fp16_t &fp16_t::operator=(const uint16_t &ui_val) { for (int i = 1; i < e_tmp; i++) { trunc_mask = (trunc_mask << 1) + 1; } - m_trunc = (m_ret & trunc_mask) << (kBitShift32 - e_tmp); + m_trunc = (m_ret & trunc_mask) << (static_cast(kBitShift32) - e_tmp); for (int i = 0; i < e_tmp; i++) { m_ret = (m_ret >> 1); e_ret = e_ret + 1; @@ -1040,7 +1040,7 @@ fp16_t &fp16_t::operator=(const uint16_t &ui_val) { } } else { e_ret = static_cast(kFp16ExpBias); - m_ret = m_ret << (kDim11 - len); + m_ret = m_ret << (static_cast(kDim11) - len); e_ret = e_ret + (len - 1); } val = FP16_CONSTRUCTOR(0u, static_cast(e_ret), m_ret); @@ -1062,7 +1062,7 @@ static void SetValByUint32Val(const uint32_t &input_val, const uint16_t &sign, u for (int i = 1; i < e_tmp; i++) { trunc_mask = (trunc_mask << 1) + 1; } - m_trunc = (m_tmp & trunc_mask) << (kBitShift32 - e_tmp); + m_trunc = (m_tmp & trunc_mask) << (static_cast(kBitShift32) - e_tmp); for (int i = 0; i < e_tmp; i++) { m_tmp = (m_tmp >> 1); e_ret = e_ret + 1; @@ -1085,7 +1085,7 @@ static void SetValByUint32Val(const uint32_t &input_val, const uint16_t &sign, u } } else { e_ret = static_cast(kFp16ExpBias); - m_tmp = m_tmp << (kDim11 - len); + m_tmp = m_tmp << (static_cast(kDim11) - len); e_ret = e_ret + (len - 1); } auto m_ret = static_cast(m_tmp); @@ -1097,7 +1097,7 @@ fp16_t &fp16_t::operator=(const int32_t &i_val) { val = 0; } else { uint32_t ui_val = *(ge::PtrToPtr(&i_val)); - auto s_ret = static_cast(ui_val >> kBitShift31); + auto s_ret = static_cast(ui_val >> static_cast(kBitShift31)); if (static_cast(s_ret)) { int32_t iValM = -i_val; ui_val = *(ge::PtrToPtr(&iValM)); @@ -1124,7 +1124,7 @@ fp16_t &fp16_t::operator=(const uint32_t &ui_val) { for (int i = 1; i < e_tmp; i++) { trunc_mask = (trunc_mask << 1) + 1; } - m_trunc = (m_tmp & trunc_mask) << static_cast(kBitShift32 - e_tmp); + m_trunc = (m_tmp & trunc_mask) << static_cast(static_cast(kBitShift32) - e_tmp); for (uint16_t i = 0; i < e_tmp; i++) { m_tmp = (m_tmp >> 1); e_ret = e_ret + 1; @@ -1147,7 +1147,7 @@ fp16_t &fp16_t::operator=(const uint32_t &ui_val) { } } else { e_ret = static_cast(kFp16ExpBias); - m_tmp = m_tmp << (kDim11 - len); + m_tmp = m_tmp << (static_cast(kDim11) - len); e_ret = e_ret + (len - 1); } auto m_ret = static_cast(m_tmp); diff --git a/parser/common/pass.h b/parser/common/pass.h index 7f00962..7db45b6 100644 --- a/parser/common/pass.h +++ b/parser/common/pass.h @@ -19,8 +19,6 @@ #include -#include "common/fmk_error_codes.h" - namespace ge { /// /// @ingroup domi_omg diff --git a/parser/common/pre_checker.cc b/parser/common/pre_checker.cc index dea8651..4fc79dc 100644 --- a/parser/common/pre_checker.cc +++ b/parser/common/pre_checker.cc @@ -218,9 +218,9 @@ Status PreChecker::Save(const string &file) { // Constructing JSON information of operators in order of network for (auto id : ops_) { - auto iter = op_map_.find(id); - GE_CHK_BOOL_RET_STATUS(iter != op_map_.end(), FAILED, "[Check][Param] don't find this op."); - Info &info = iter->second; + std::map::const_iterator iter = op_map_.find(id); + GE_CHK_BOOL_RET_STATUS(iter != op_map_.cend(), FAILED, "[Check][Param] don't find this op."); + const Info &info = iter->second; // Initialization operator general information nlohmann::json op = {{kKeyOpName, info.name}, {kKeyOpType, info.type}}; diff --git a/parser/common/proto_file_parser.cc b/parser/common/proto_file_parser.cc index af4a4d0..0bae5db 100644 --- a/parser/common/proto_file_parser.cc +++ b/parser/common/proto_file_parser.cc @@ -67,7 +67,7 @@ bool GetIdentifier(const std::string &line, int &identifier) { break; } if (line[i] >= kMinNum && line[i] <= kMaxNum) { - identifier = identifier * kDecimalMulti + line[i] - kMinNum; + identifier = identifier * kDecimalMulti + static_cast(line[i]) - static_cast(kMinNum); } if (identifier > kMaxIdentifier || identifier < 0) { return false; diff --git a/parser/common/register_tbe.cc b/parser/common/register_tbe.cc index d21a61e..421c9cb 100644 --- a/parser/common/register_tbe.cc +++ b/parser/common/register_tbe.cc @@ -75,7 +75,7 @@ bool OpRegistrationTbe::Finalize(const OpRegistrationData ®_data, bool is_tra return ret; } -bool OpRegistrationTbe::RegisterParser(const OpRegistrationData ®_data) { +bool OpRegistrationTbe::RegisterParser(const OpRegistrationData ®_data) const { if (reg_data.GetFrameworkType() == domi::TENSORFLOW) { std::shared_ptr factory = OpParserFactory::Instance(domi::TENSORFLOW); if (factory == nullptr) { diff --git a/parser/common/register_tbe.h b/parser/common/register_tbe.h index 7e2803c..5578054 100644 --- a/parser/common/register_tbe.h +++ b/parser/common/register_tbe.h @@ -27,7 +27,7 @@ class OpRegistrationTbe { bool Finalize(const OpRegistrationData ®_data, bool is_train = false); private: - bool RegisterParser(const OpRegistrationData ®_data); + bool RegisterParser(const OpRegistrationData ®_data) const; }; } // namespace ge diff --git a/parser/onnx/subgraph_adapter/if_subgraph_adapter.cc b/parser/onnx/subgraph_adapter/if_subgraph_adapter.cc index 23b859c..3b3eaf1 100644 --- a/parser/onnx/subgraph_adapter/if_subgraph_adapter.cc +++ b/parser/onnx/subgraph_adapter/if_subgraph_adapter.cc @@ -45,7 +45,7 @@ domi::Status IfSubgraphAdapter::AdaptAndFindAllSubgraphs( domi::Status IfSubgraphAdapter::ParseIfNodeSubgraphs( ge::onnx::NodeProto *parent_node, std::vector &onnx_graphs, - std::map &name_to_onnx_graph, const std::string &parent_graph_name) { + std::map &name_to_onnx_graph, const std::string &parent_graph_name) const { if (parent_node->attribute_size() != kIfNodeAttrSize) { GELOGE(FAILED, "[Parse][Node] Invalid graph, if node attribute size:%d must be 2.", parent_node->attribute_size()); REPORT_INNER_ERROR("E19999", "Invalid graph, if node attribute size:%d must be 2.", parent_node->attribute_size()); diff --git a/parser/onnx/subgraph_adapter/if_subgraph_adapter.h b/parser/onnx/subgraph_adapter/if_subgraph_adapter.h index ff2f6e6..eb6f492 100644 --- a/parser/onnx/subgraph_adapter/if_subgraph_adapter.h +++ b/parser/onnx/subgraph_adapter/if_subgraph_adapter.h @@ -32,7 +32,7 @@ class PARSER_FUNC_VISIBILITY IfSubgraphAdapter : public SubgraphAdapter { private: domi::Status ParseIfNodeSubgraphs(ge::onnx::NodeProto *parent_node, std::vector &onnx_graphs, std::map &name_to_onnx_graph, - const std::string &parent_graph_name); + const std::string &parent_graph_name) const; domi::Status GetSubgraphsAllInputs(ge::onnx::GraphProto &onnx_graph, std::set &all_inputs) const; void AddInputNodeForGraph(const std::set &all_inputs, ge::onnx::GraphProto &onnx_graph) const; void AddInputForParentNode(const std::set &all_inputs, ge::onnx::NodeProto &parent_node) const; diff --git a/parser/tensorflow/tensorflow_fusion_custom_parser_adapter.cc b/parser/tensorflow/tensorflow_fusion_custom_parser_adapter.cc index dd27d55..fce58d8 100644 --- a/parser/tensorflow/tensorflow_fusion_custom_parser_adapter.cc +++ b/parser/tensorflow/tensorflow_fusion_custom_parser_adapter.cc @@ -59,7 +59,7 @@ Status TensorFlowFusionCustomParserAdapter::ParseParams(const vector &v_input_const, - ge::NodePtr &node) { + ge::NodePtr &node) const { GE_CHECK_NOTNULL(node); auto op_dest = node->GetOpDesc(); GE_CHECK_NOTNULL(op_dest); diff --git a/parser/tensorflow/tensorflow_fusion_custom_parser_adapter.h b/parser/tensorflow/tensorflow_fusion_custom_parser_adapter.h index 676ff64..591b38c 100644 --- a/parser/tensorflow/tensorflow_fusion_custom_parser_adapter.h +++ b/parser/tensorflow/tensorflow_fusion_custom_parser_adapter.h @@ -42,7 +42,7 @@ class PARSER_FUNC_VISIBILITY TensorFlowFusionCustomParserAdapter : public Tensor * @return FAILED parse failed * @author */ - Status ParseParams(const std::vector &v_input_const, ge::NodePtr &node); + Status ParseParams(const std::vector &v_input_const, ge::NodePtr &node) const; }; } // namespace ge diff --git a/parser/tensorflow/tensorflow_parser.cc b/parser/tensorflow/tensorflow_parser.cc index a1a2d9a..189e176 100644 --- a/parser/tensorflow/tensorflow_parser.cc +++ b/parser/tensorflow/tensorflow_parser.cc @@ -3059,7 +3059,7 @@ Status TensorFlowModelParser::TrimGraphByInput(const domi::tensorflow::GraphDef GE_CHECK_NOTNULL(current_node); for (const string &input_name : current_node->input()) { string input_node_name = NodeNameFromInput(input_name); - if (!delete_nodes.count(input_node_name)) { + if (delete_nodes.count(input_node_name) == 0U) { next_inputs.insert(input_node_name); } } @@ -3072,7 +3072,7 @@ Status TensorFlowModelParser::TrimGraphByInput(const domi::tensorflow::GraphDef if (static_cast(input_nodes.count(node.name()))) { *(filtered_graph_def.mutable_node()->Add()) = node; } - if (!delete_nodes.count(node.name())) { + if (delete_nodes.count(node.name()) == 0U) { *(filtered_graph_def.mutable_node()->Add()) = node; } } @@ -3135,7 +3135,7 @@ Status TensorFlowModelParser::TrimGraphByOutput(const domi::tensorflow::GraphDef GE_CHECK_NOTNULL(current_node); for (const string &input_name : current_node->input()) { string input_node_name = NodeNameFromInput(input_name); - if (!required_nodes.count(input_node_name)) { + if (required_nodes.count(input_node_name) == 0U) { next_inputs.insert(input_node_name); } }