| @@ -85,10 +85,10 @@ Status CaffeCustomParserAdapter::ParseWeights(const Message *op_src, ge::NodePtr | |||
| return SUCCESS; | |||
| } | |||
| bool bias_en = false; | |||
| bool update_in_turn = (static_cast<int64_t >(op->GetAllInputsSize()) == (layer->bottom_size() + layer->blobs_size())); | |||
| int start_pos = layer->bottom_size(); | |||
| for (int i = 0; i < layer->blobs_size(); ++i) { | |||
| bool bias_en = false; | |||
| ge::GeTensorPtr weight = ge::parser::MakeShared<ge::GeTensor>(); | |||
| GE_CHECK_NOTNULL(weight); | |||
| GE_CHK_STATUS_RET(ConvertWeight(layer->blobs(i), layer->name(), weight), | |||
| @@ -857,8 +857,7 @@ Status CaffeModelParser::AddNode(const domi::caffe::LayerParameter &layer, ge::C | |||
| // AddConstInput is a function defined in caffe_op_parser, override in caffe_reshape_parser. | |||
| std::shared_ptr<CaffeOpParser> caffe_op_parser = std::static_pointer_cast<CaffeOpParser>(op_parser); | |||
| GE_CHECK_NOTNULL(caffe_op_parser); | |||
| Status status; | |||
| status = caffe_op_parser->AddConstInput(node); | |||
| Status status = caffe_op_parser->AddConstInput(node); | |||
| if (status != SUCCESS) { | |||
| REPORT_CALL_ERROR("E19999", "AddConstInput failed for node:%s", node->GetOpDesc()->GetName().c_str()); | |||
| GELOGE(FAILED, "[Add][ConstInput] to node %s fail.", node->GetOpDesc()->GetName().c_str()); | |||
| @@ -938,7 +937,7 @@ Status CaffeModelParser::AddTensorDescToOpDescByIr(ge::OpDescPtr &op_desc, const | |||
| for (int i = 0; i < valid_input_size; i++) { | |||
| ge::GeTensorDesc input_tensor; | |||
| std::string input_name; | |||
| ge::graphStatus ret = ge::GRAPH_SUCCESS; | |||
| ge::graphStatus ret; | |||
| // Below cases are supported fow now when there are optional inputs | |||
| // x means optional, o means requierd input | |||
| // a. ooxxx, number of o and x>=layer.bottom_size+layer.blobs_size>=number of o | |||
| @@ -230,8 +230,6 @@ inline domi::Status CheckInt64Uint32MulOverflow(int64_t a, uint32_t b) { | |||
| } // namespace parser | |||
| } // namespace ge | |||
| /*lint --emacro((773),GE_TIMESTAMP_START)*/ | |||
| /*lint -esym(773,GE_TIMESTAMP_START)*/ | |||
| #define PARSER_TIMESTAMP_START(stage) uint64_t startUsec_##stage = ge::parser::GetCurrentTimestamp() | |||
| #define PARSER_TIMESTAMP_END(stage, stage_name) \ | |||
| @@ -21,7 +21,6 @@ | |||
| #include <string> | |||
| #include <vector> | |||
| /*lint -e1073*/ | |||
| namespace ge { | |||
| // the operator type mapping table of caffe and mindspore | |||
| extern std::map<std::string, std::string> caffe_op_map; | |||
| @@ -41,5 +40,4 @@ extern std::vector<std::string> is_dataset_op_vec; | |||
| // output tensor num | |||
| extern std::map<std::string, int32_t> op_output_tensor_num; | |||
| } // namespace ge | |||
| /*lint +e1073*/ | |||
| #endif // GE_COMMON_OP_MAP_H_ | |||
| @@ -101,7 +101,7 @@ class OpParserFactory { | |||
| * @ingroup domi_omg | |||
| * @brief Each Op corresponds to a Creator function | |||
| */ | |||
| std::map<std::string, CREATOR_FUN> op_parser_creator_map_; // lint !e1073 | |||
| std::map<std::string, CREATOR_FUN> op_parser_creator_map_; | |||
| std::map<std::string, CREATOR_FUN> fusion_op_parser_creator_map_; | |||
| friend class OpParserRegisterar; | |||
| @@ -675,8 +675,7 @@ static uint16_t Fp16Div(uint16_t v_1, uint16_t v_2) { | |||
| uint64_t m_tmp; | |||
| if (e_a > e_b) { | |||
| m_tmp = m_a; | |||
| uint16_t tmp; | |||
| tmp = e_a - e_b; | |||
| uint16_t tmp = e_a - e_b; | |||
| for (int i = 0; i < tmp; i++) { | |||
| m_tmp = m_tmp << 1; | |||
| } | |||
| @@ -690,8 +689,7 @@ static uint16_t Fp16Div(uint16_t v_1, uint16_t v_2) { | |||
| m_b = m_tmp; | |||
| } | |||
| m_div = static_cast<float>(m_a * 1.0f / m_b); | |||
| fp16_t fp_div; | |||
| fp_div = m_div; | |||
| fp16_t fp_div = m_div; | |||
| ret = fp_div.val; | |||
| if (s_a != s_b) { | |||
| ret |= kFp16SignMask; | |||
| @@ -77,7 +77,7 @@ protected: | |||
| void RegisterCreator(const std::string &type, CREATOR_FUN fun); | |||
| private: | |||
| std::map<std::string, CREATOR_FUN> subgraph_adapter_creator_map_; // lint !e1073 | |||
| std::map<std::string, CREATOR_FUN> subgraph_adapter_creator_map_; | |||
| friend class SubgraphAdapterRegisterar; | |||
| }; | |||
| @@ -858,7 +858,6 @@ Status CreateNodeDefBytes(ge::NodePtr n, string originalType, map<string, PIOLis | |||
| // Set size | |||
| for (auto ge_desc : opDesc->GetAllOutputsDescPtr()) { | |||
| int64_t real_size = 1; | |||
| int64_t tmp_dim = 0; | |||
| auto data_type = ge_desc->GetDataType(); | |||
| uint32_t size_type = 1; | |||
| @@ -873,7 +872,7 @@ Status CreateNodeDefBytes(ge::NodePtr n, string originalType, map<string, PIOLis | |||
| // calculate size | |||
| for (uint32_t j = 0; j < ge_desc->GetShape().GetDimNum(); ++j) { | |||
| tmp_dim = ge_desc->GetShape().GetDim(j); | |||
| int64_t tmp_dim = ge_desc->GetShape().GetDim(j); | |||
| GE_CHECK_GE(tmp_dim, 0); | |||
| PARSER_INT64_MULCHECK(real_size, tmp_dim); | |||
| real_size *= tmp_dim; | |||
| @@ -1279,8 +1278,7 @@ Status CreateFuncDefBytes(ge::NodePtr n, string original_type, string func_bin_p | |||
| GELOGI("len =%d\n", len); | |||
| ge::GeAttrValue::BYTES funcDefBytes; | |||
| funcDefBytes = ge::Buffer::CopyFrom((std::uint8_t *)buf, len); | |||
| ge::GeAttrValue::BYTES funcDefBytes = ge::Buffer::CopyFrom((std::uint8_t *)buf, len); | |||
| (void)ge::AttrUtils::SetBytes(opDesc, ge::ATTR_NAME_FRAMEWORK_FUNC_DEF, funcDefBytes); | |||
| GELOGI("funcDefBytes.GetSize() =%zu", funcDefBytes.GetSize()); | |||
| @@ -1049,11 +1049,9 @@ Status TensorFlowModelParser::AdaptOpType(const domi::tensorflow::NodeDef *node_ | |||
| op_type = tensorflow_train_op_map.at(node_op); | |||
| GE_CHK_STATUS_RET(CheckOpType(node_def, op_type), "Failed to check op type"); | |||
| } else { | |||
| op_type = ge::parser::FRAMEWORKOP; | |||
| domi::tensorflow::AttrValue attr_call_inference; | |||
| if ((node_name == node_op) && | |||
| ge::TensorFlowUtil::FindAttrValue(node_def, "_disable_call_shape_inference", attr_call_inference)) { | |||
| op_type = node_op; | |||
| } | |||
| } | |||
| @@ -1688,7 +1686,6 @@ Status TensorFlowModelParser::CheckInputNodeName(const string &input_node_name, | |||
| } | |||
| } | |||
| int32_t tmp_index = 0; | |||
| auto find = tmp_input_node_name.find(":"); | |||
| if (find == string::npos) { | |||
| *node_name = tmp_input_node_name; | |||
| @@ -1696,6 +1693,7 @@ Status TensorFlowModelParser::CheckInputNodeName(const string &input_node_name, | |||
| if (index == nullptr) { | |||
| return SUCCESS; | |||
| } | |||
| int32_t tmp_index = 0; | |||
| *index = tmp_index; | |||
| return SUCCESS; | |||
| @@ -2060,15 +2058,14 @@ Status TensorFlowModelParser::EraseNormalOpOutputIfChild(shared_ptr<ge::ScopeGra | |||
| for (auto iter = normal_op_node_context.output_map.begin(); iter != normal_op_node_context.output_map.end();) { | |||
| string output_node_name = iter->first; | |||
| ge::ScopeFusionOpInfo to_info; | |||
| int32_t from_index = 0; | |||
| int32_t to_index = 0; | |||
| if (IsFusionOpChild(output_node_name, &to_info) && | |||
| nodedef_map_[output_node_name]->op() != TENSORFLOWF_NODE_OP_CONST) { | |||
| // Fuse operator, update index | |||
| std::vector<std::pair<int32_t, int32_t>> &pairs = iter->second; | |||
| for (auto &pair : pairs) { | |||
| from_index = pair.first; | |||
| int32_t from_index = pair.first; | |||
| int32_t to_index = 0; | |||
| GE_RETURN_WITH_LOG_IF_ERROR(GetInPutIndex(scope_graph, to_info, pair.second, to_index), | |||
| "GetInPutIndex failed ,output_node_name %s.", output_node_name.c_str()); | |||
| tmp_output_map[to_info.fusion_node_name].push_back({from_index, to_index}); | |||
| @@ -2097,15 +2094,14 @@ Status TensorFlowModelParser::UpdateNormalOpContext(shared_ptr<ge::ScopeGraph> & | |||
| for (auto iter = normal_op_node_context.input_map.begin(); iter != normal_op_node_context.input_map.end();) { | |||
| string input_node_name = iter->first; | |||
| ge::ScopeFusionOpInfo from_info; | |||
| int32_t from_index = 0; | |||
| int32_t to_index = 0; | |||
| if (IsFusionOpChild(input_node_name, &from_info) && | |||
| nodedef_map_[input_node_name]->op() != TENSORFLOWF_NODE_OP_CONST) { | |||
| // Fuse operator, update index | |||
| std::vector<std::pair<int32_t, int32_t>> &pairs = iter->second; | |||
| for (auto &pair : pairs) { | |||
| to_index = pair.second; | |||
| int32_t from_index = 0; | |||
| int32_t to_index = pair.second; | |||
| GE_RETURN_WITH_LOG_IF_ERROR(GetOutPutIndex(scope_graph, from_info, pair.first, from_index), | |||
| "GetOutPutIndex failed ,input_node_name %s.", input_node_name.c_str()); | |||
| tmp_input_map[from_info.fusion_node_name].push_back({from_index, to_index}); | |||
| @@ -3180,8 +3176,7 @@ Status TensorFlowModelParser::TrimGraphByInput(const domi::tensorflow::GraphDef | |||
| output_graph_def->Clear(); | |||
| for (const NodeDef &node : filtered_graph_def.node()) { | |||
| if (input_nodes.count(node.name())) { | |||
| NodeDef placeholder_node; | |||
| placeholder_node = node; | |||
| NodeDef placeholder_node = node; | |||
| placeholder_node.clear_input(); | |||
| GE_IF_BOOL_EXEC(node.op() != "Placeholder", placeholder_node.set_op("Placeholder")); | |||
| domi::tensorflow::AttrValue attr_value; | |||
| @@ -3778,8 +3773,8 @@ void TensorFlowModelParser::UpdateInnerInputMap(const string &fusion_op_name, Op | |||
| std::map<std::string, std::vector<std::pair<int32_t, int32_t>>> tmp_input_map; | |||
| for (auto iter = op_node_context.input_map.begin(); iter != op_node_context.input_map.end();) { | |||
| string src_name = iter->first; | |||
| std::vector<std::pair<int32_t, int32_t>> &input_idx = iter->second; | |||
| if (src_name == ge::kInputFromFusionScope) { | |||
| std::vector<std::pair<int32_t, int32_t>> &input_idx = iter->second; | |||
| for (const auto &in_pair : input_idx) { | |||
| if (in_pair.second != kControlSlot) { | |||
| auto data = remap_data_input[fusion_op_name + std::to_string(in_pair.first)]; | |||
| @@ -3825,8 +3820,8 @@ void TensorFlowModelParser::UpdateInnerOutputMap(const string &fusion_op_name, O | |||
| std::map<std::string, std::vector<std::pair<int32_t, int32_t>>> tmp_output_map; | |||
| for (auto iter = op_node_context.output_map.begin(); iter != op_node_context.output_map.end();) { | |||
| string dst_name = iter->first; | |||
| std::vector<std::pair<int32_t, int32_t>> &output_idx = iter->second; | |||
| if (dst_name == ge::kOutputToFusionScope) { | |||
| std::vector<std::pair<int32_t, int32_t>> &output_idx = iter->second; | |||
| for (const auto &out_pair : output_idx) { | |||
| if (out_pair.second != kControlSlot) { | |||
| auto data_outputs = remap_data_output[fusion_op_name + std::to_string(out_pair.second)]; | |||
| @@ -34,7 +34,6 @@ Status TensorFlowReshapeParser::ParseDesc(const domi::tensorflow::AttrValue &att | |||
| "parse ge_desc failed."); | |||
| uint32_t size_type = 1; | |||
| int64_t real_size = 1; | |||
| int64_t tmp_dim = 0; | |||
| auto data_type = ge_desc.GetDataType(); | |||
| bool type_ret = ge::TypeUtils::GetDataTypeLength(data_type, size_type); | |||
| @@ -46,7 +45,7 @@ Status TensorFlowReshapeParser::ParseDesc(const domi::tensorflow::AttrValue &att | |||
| return PARAM_INVALID); | |||
| // calculate size | |||
| for (uint32_t j = 0; j < ge_desc.GetShape().GetDimNum(); ++j) { | |||
| tmp_dim = ge_desc.GetShape().GetDim(j); | |||
| int64_t tmp_dim = ge_desc.GetShape().GetDim(j); | |||
| GE_IF_BOOL_EXEC(tmp_dim < 0, real_size = tmp_dim * (-1) * real_size; continue;); | |||
| real_size *= tmp_dim; | |||
| } | |||
| @@ -38,7 +38,6 @@ Status TensorFlowSqueezeParser::ParseDesc(const domi::tensorflow::AttrValue &att | |||
| "parse ge_desc failed."); | |||
| uint32_t size_type; | |||
| int64_t real_size = 1; | |||
| int64_t tmp_dim = 0; | |||
| auto data_type = ge_desc.GetDataType(); | |||
| bool type_ret = ge::TypeUtils::GetDataTypeLength(data_type, size_type); | |||
| @@ -50,7 +49,7 @@ Status TensorFlowSqueezeParser::ParseDesc(const domi::tensorflow::AttrValue &att | |||
| return domi::PARAM_INVALID); | |||
| // calculate size | |||
| for (uint32_t j = 0; j < ge_desc.GetShape().GetDimNum(); ++j) { | |||
| tmp_dim = ge_desc.GetShape().GetDim(j); | |||
| int64_t tmp_dim = ge_desc.GetShape().GetDim(j); | |||
| GE_IF_BOOL_EXEC(tmp_dim < 0, real_size = tmp_dim * (-1) * real_size; continue;); | |||
| PARSER_INT64_MULCHECK(real_size, tmp_dim); | |||
| real_size *= tmp_dim; | |||
| @@ -179,14 +179,13 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY domi::Status TensorFlowUtil::Tr | |||
| "parse ge_desc failed."); | |||
| uint32_t size_type = 1; | |||
| int64_t tmp_dim = 0; | |||
| auto data_type = ge_desc.GetDataType(); | |||
| GE_CHK_BOOL_RET_STATUS(ge::TypeUtils::GetDataTypeLength(data_type, size_type), PARAM_INVALID, | |||
| "dataType no define size , parse ge_desc failed."); | |||
| // get size | |||
| for (uint32_t j = 0; j < ge_desc.GetShape().GetDimNum(); ++j) { | |||
| tmp_dim = ge_desc.GetShape().GetDim(j); | |||
| int64_t tmp_dim = ge_desc.GetShape().GetDim(j); | |||
| // The shape infered by fusedbatchnormgrad and mean calling tensorflow is not accurate. | |||
| // Here, special treatment is given to the two operators. | |||