| Author | SHA1 | Message | Date |
|---|---|---|---|
|
|
87f695e2cf |
!386 回退代码
Merge pull request !386 from 赵鲁鹏/c79_open |
4 years ago |
|
|
7c00192dcb | backspace | 4 years ago |
|
|
15a3bd5da1 |
!381 clear sc
Merge pull request !381 from 刘康/lk-1.6 |
4 years ago |
|
|
0e2afbfc26 |
!377 c79 opensource告警清除
Merge pull request !377 from 赵鲁鹏/c79_open |
4 years ago |
|
|
5dc79a9b7d | fix opensource | 4 years ago |
|
|
50edd7aa2f | fix opensource | 4 years ago |
|
|
1e68fdfd3a | fix opensource | 4 years ago |
|
|
f58302c3aa | clear sc | 4 years ago |
|
|
c110be6445 | fix opensource | 4 years ago |
|
|
927c50a94b | update OWNERS. | 4 years ago |
|
|
de84350c26 | update .gitmodules. | 4 years ago |
| @@ -1,4 +1,4 @@ | |||||
| [submodule "metadef"] | [submodule "metadef"] | ||||
| path = metadef | path = metadef | ||||
| url = https://gitee.com/ascend/metadef.git | url = https://gitee.com/ascend/metadef.git | ||||
| branch = master | |||||
| branch = r1.6.0 | |||||
| @@ -1,7 +1,7 @@ | |||||
| approvers: | approvers: | ||||
| - ji_chen | - ji_chen | ||||
| - wqtshg | |||||
| - ljl0711 | - ljl0711 | ||||
| - liyihan123 | |||||
| reviewers: | reviewers: | ||||
| - xchu42 | - xchu42 | ||||
| - sheng-nan | - sheng-nan | ||||
| @@ -85,10 +85,10 @@ Status CaffeCustomParserAdapter::ParseWeights(const Message *op_src, ge::NodePtr | |||||
| return SUCCESS; | return SUCCESS; | ||||
| } | } | ||||
| bool bias_en = false; | |||||
| bool update_in_turn = (static_cast<int64_t >(op->GetAllInputsSize()) == (layer->bottom_size() + layer->blobs_size())); | bool update_in_turn = (static_cast<int64_t >(op->GetAllInputsSize()) == (layer->bottom_size() + layer->blobs_size())); | ||||
| int start_pos = layer->bottom_size(); | int start_pos = layer->bottom_size(); | ||||
| for (int i = 0; i < layer->blobs_size(); ++i) { | for (int i = 0; i < layer->blobs_size(); ++i) { | ||||
| bool bias_en = false; | |||||
| ge::GeTensorPtr weight = ge::parser::MakeShared<ge::GeTensor>(); | ge::GeTensorPtr weight = ge::parser::MakeShared<ge::GeTensor>(); | ||||
| GE_CHECK_NOTNULL(weight); | GE_CHECK_NOTNULL(weight); | ||||
| GE_CHK_STATUS_RET(ConvertWeight(layer->blobs(i), layer->name(), weight), | GE_CHK_STATUS_RET(ConvertWeight(layer->blobs(i), layer->name(), weight), | ||||
| @@ -857,8 +857,7 @@ Status CaffeModelParser::AddNode(const domi::caffe::LayerParameter &layer, ge::C | |||||
| // AddConstInput is a function defined in caffe_op_parser, override in caffe_reshape_parser. | // AddConstInput is a function defined in caffe_op_parser, override in caffe_reshape_parser. | ||||
| std::shared_ptr<CaffeOpParser> caffe_op_parser = std::static_pointer_cast<CaffeOpParser>(op_parser); | std::shared_ptr<CaffeOpParser> caffe_op_parser = std::static_pointer_cast<CaffeOpParser>(op_parser); | ||||
| GE_CHECK_NOTNULL(caffe_op_parser); | GE_CHECK_NOTNULL(caffe_op_parser); | ||||
| Status status; | |||||
| status = caffe_op_parser->AddConstInput(node); | |||||
| Status status = caffe_op_parser->AddConstInput(node); | |||||
| if (status != SUCCESS) { | if (status != SUCCESS) { | ||||
| REPORT_CALL_ERROR("E19999", "AddConstInput failed for node:%s", node->GetOpDesc()->GetName().c_str()); | REPORT_CALL_ERROR("E19999", "AddConstInput failed for node:%s", node->GetOpDesc()->GetName().c_str()); | ||||
| GELOGE(FAILED, "[Add][ConstInput] to node %s fail.", node->GetOpDesc()->GetName().c_str()); | GELOGE(FAILED, "[Add][ConstInput] to node %s fail.", node->GetOpDesc()->GetName().c_str()); | ||||
| @@ -938,7 +937,7 @@ Status CaffeModelParser::AddTensorDescToOpDescByIr(ge::OpDescPtr &op_desc, const | |||||
| for (int i = 0; i < valid_input_size; i++) { | for (int i = 0; i < valid_input_size; i++) { | ||||
| ge::GeTensorDesc input_tensor; | ge::GeTensorDesc input_tensor; | ||||
| std::string input_name; | std::string input_name; | ||||
| ge::graphStatus ret = ge::GRAPH_SUCCESS; | |||||
| ge::graphStatus ret; | |||||
| // Below cases are supported fow now when there are optional inputs | // Below cases are supported fow now when there are optional inputs | ||||
| // x means optional, o means requierd input | // x means optional, o means requierd input | ||||
| // a. ooxxx, number of o and x>=layer.bottom_size+layer.blobs_size>=number of o | // a. ooxxx, number of o and x>=layer.bottom_size+layer.blobs_size>=number of o | ||||
| @@ -1097,8 +1096,8 @@ Status CaffeModelParser::AddUserOutNodesTop() { | |||||
| } | } | ||||
| Status CaffeModelParser::AddOutputTop(const domi::caffe::NetParameter &proto_message) { | Status CaffeModelParser::AddOutputTop(const domi::caffe::NetParameter &proto_message) { | ||||
| for (int32_t i = 0; i < proto_message.layer_size(); i++) { | |||||
| const domi::caffe::LayerParameter &layer = proto_message.layer(i); | |||||
| for (int32_t j = 0; j < proto_message.layer_size(); j++) { | |||||
| const domi::caffe::LayerParameter &layer = proto_message.layer(j); | |||||
| if (!CheckValidLayer(layer)) { | if (!CheckValidLayer(layer)) { | ||||
| continue; | continue; | ||||
| @@ -1296,8 +1295,8 @@ Status CaffeModelParser::ParseFromMemory(const char *data, uint32_t size, ge::Co | |||||
| // parse ParamSpec | // parse ParamSpec | ||||
| std::vector<string> v_param_names; | std::vector<string> v_param_names; | ||||
| for (int i = 0; i < layer.param_size(); i++) { | |||||
| const domi::caffe::ParamSpec ¶m = layer.param(i); | |||||
| for (int j = 0; j < layer.param_size(); j++) { | |||||
| const domi::caffe::ParamSpec ¶m = layer.param(j); | |||||
| GE_IF_BOOL_EXEC((param.has_name()), v_param_names.emplace_back(param.name())); | GE_IF_BOOL_EXEC((param.has_name()), v_param_names.emplace_back(param.name())); | ||||
| } | } | ||||
| @@ -1515,8 +1514,8 @@ Status CaffeModelParser::Parse(const char *model_path, ge::ComputeGraphPtr &grap | |||||
| // parse ParamSpec | // parse ParamSpec | ||||
| std::vector<string> v_param_names; | std::vector<string> v_param_names; | ||||
| for (int i = 0; i < layer.param_size(); i++) { | |||||
| const domi::caffe::ParamSpec ¶m = layer.param(i); | |||||
| for (int j = 0; j < layer.param_size(); j++) { | |||||
| const domi::caffe::ParamSpec ¶m = layer.param(j); | |||||
| GE_IF_BOOL_EXEC((param.has_name()), v_param_names.emplace_back(param.name())); | GE_IF_BOOL_EXEC((param.has_name()), v_param_names.emplace_back(param.name())); | ||||
| } | } | ||||
| @@ -2095,17 +2094,17 @@ Status CaffeWeightsParser::ConvertLayerParameter(const google::protobuf::Message | |||||
| ge::ComputeGraphPtr &graph) { | ge::ComputeGraphPtr &graph) { | ||||
| vector<string> need_share_layers; | vector<string> need_share_layers; | ||||
| const domi::caffe::LayerParameter *layer = reinterpret_cast<const domi::caffe::LayerParameter *>(layer_message); | const domi::caffe::LayerParameter *layer = reinterpret_cast<const domi::caffe::LayerParameter *>(layer_message); | ||||
| const string &layer_name = layer->name(); | |||||
| const string &share_layer_name = layer->name(); | |||||
| const string &layer_type = layer->type(); | const string &layer_type = layer->type(); | ||||
| for (auto p_iter = params_share_map.begin(); p_iter != params_share_map.end(); ++p_iter) { | for (auto p_iter = params_share_map.begin(); p_iter != params_share_map.end(); ++p_iter) { | ||||
| if (find(p_iter->second.begin(), p_iter->second.end(), layer_name) != p_iter->second.end()) { | |||||
| GELOGI("layer:%s need share weights !", layer_name.c_str()); | |||||
| if (find(p_iter->second.begin(), p_iter->second.end(), share_layer_name) != p_iter->second.end()) { | |||||
| GELOGI("layer:%s need share weights !", share_layer_name.c_str()); | |||||
| need_share_layers = p_iter->second; | need_share_layers = p_iter->second; | ||||
| } | } | ||||
| } | } | ||||
| if (need_share_layers.size() == 0) { | if (need_share_layers.size() == 0) { | ||||
| need_share_layers.push_back(layer_name); | |||||
| need_share_layers.push_back(share_layer_name); | |||||
| } | } | ||||
| for (auto share_iter = need_share_layers.begin(); share_iter != need_share_layers.end(); ++share_iter) { | for (auto share_iter = need_share_layers.begin(); share_iter != need_share_layers.end(); ++share_iter) { | ||||
| @@ -2211,27 +2210,27 @@ Status CaffeWeightsParser::ConvertNetParameter(const NetParameter ¶m, ge::Co | |||||
| for (int i = 0; i < num_layer; ++i) { | for (int i = 0; i < num_layer; ++i) { | ||||
| const LayerParameter &layer = param.layer(i); | const LayerParameter &layer = param.layer(i); | ||||
| const string &layer_name = layer.name(); | |||||
| const string &share_layer_name = layer.name(); | |||||
| // Skip some layer types | // Skip some layer types | ||||
| if (skiped_layer_type_.find(layer.type()) != skiped_layer_type_.end()) { | if (skiped_layer_type_.find(layer.type()) != skiped_layer_type_.end()) { | ||||
| GELOGI("Skip layer %s", layer_name.c_str()); | |||||
| GELOGI("Skip layer %s", share_layer_name.c_str()); | |||||
| continue; | continue; | ||||
| } | } | ||||
| GELOGI("Parse layer %s", layer_name.c_str()); | |||||
| GELOGI("Parse layer %s", share_layer_name.c_str()); | |||||
| vector<string> need_share_layers; | vector<string> need_share_layers; | ||||
| for (auto p_iter = params_share_map.begin(); p_iter != params_share_map.end(); ++p_iter) { | for (auto p_iter = params_share_map.begin(); p_iter != params_share_map.end(); ++p_iter) { | ||||
| if (find(p_iter->second.begin(), p_iter->second.end(), layer_name) != p_iter->second.end()) { | |||||
| GELOGI("Layer: %s need share weights !", layer_name.c_str()); | |||||
| if (find(p_iter->second.begin(), p_iter->second.end(), share_layer_name) != p_iter->second.end()) { | |||||
| GELOGI("Layer: %s need share weights !", share_layer_name.c_str()); | |||||
| need_share_layers = p_iter->second; | need_share_layers = p_iter->second; | ||||
| } | } | ||||
| } | } | ||||
| if (need_share_layers.size() == 0) { | if (need_share_layers.size() == 0) { | ||||
| need_share_layers.push_back(layer_name); | |||||
| need_share_layers.push_back(share_layer_name); | |||||
| } | } | ||||
| for (auto share_iter = need_share_layers.begin(); share_iter != need_share_layers.end(); ++share_iter) { | for (auto share_iter = need_share_layers.begin(); share_iter != need_share_layers.end(); ++share_iter) { | ||||
| @@ -230,8 +230,6 @@ inline domi::Status CheckInt64Uint32MulOverflow(int64_t a, uint32_t b) { | |||||
| } // namespace parser | } // namespace parser | ||||
| } // namespace ge | } // namespace ge | ||||
| /*lint --emacro((773),GE_TIMESTAMP_START)*/ | |||||
| /*lint -esym(773,GE_TIMESTAMP_START)*/ | |||||
| #define PARSER_TIMESTAMP_START(stage) uint64_t startUsec_##stage = ge::parser::GetCurrentTimestamp() | #define PARSER_TIMESTAMP_START(stage) uint64_t startUsec_##stage = ge::parser::GetCurrentTimestamp() | ||||
| #define PARSER_TIMESTAMP_END(stage, stage_name) \ | #define PARSER_TIMESTAMP_END(stage, stage_name) \ | ||||
| @@ -21,7 +21,6 @@ | |||||
| #include <string> | #include <string> | ||||
| #include <vector> | #include <vector> | ||||
| /*lint -e1073*/ | |||||
| namespace ge { | namespace ge { | ||||
| // the operator type mapping table of caffe and mindspore | // the operator type mapping table of caffe and mindspore | ||||
| extern std::map<std::string, std::string> caffe_op_map; | extern std::map<std::string, std::string> caffe_op_map; | ||||
| @@ -41,5 +40,4 @@ extern std::vector<std::string> is_dataset_op_vec; | |||||
| // output tensor num | // output tensor num | ||||
| extern std::map<std::string, int32_t> op_output_tensor_num; | extern std::map<std::string, int32_t> op_output_tensor_num; | ||||
| } // namespace ge | } // namespace ge | ||||
| /*lint +e1073*/ | |||||
| #endif // GE_COMMON_OP_MAP_H_ | #endif // GE_COMMON_OP_MAP_H_ | ||||
| @@ -101,7 +101,7 @@ class OpParserFactory { | |||||
| * @ingroup domi_omg | * @ingroup domi_omg | ||||
| * @brief Each Op corresponds to a Creator function | * @brief Each Op corresponds to a Creator function | ||||
| */ | */ | ||||
| std::map<std::string, CREATOR_FUN> op_parser_creator_map_; // lint !e1073 | |||||
| std::map<std::string, CREATOR_FUN> op_parser_creator_map_; | |||||
| std::map<std::string, CREATOR_FUN> fusion_op_parser_creator_map_; | std::map<std::string, CREATOR_FUN> fusion_op_parser_creator_map_; | ||||
| friend class OpParserRegisterar; | friend class OpParserRegisterar; | ||||
| @@ -675,8 +675,7 @@ static uint16_t Fp16Div(uint16_t v_1, uint16_t v_2) { | |||||
| uint64_t m_tmp; | uint64_t m_tmp; | ||||
| if (e_a > e_b) { | if (e_a > e_b) { | ||||
| m_tmp = m_a; | m_tmp = m_a; | ||||
| uint16_t tmp; | |||||
| tmp = e_a - e_b; | |||||
| uint16_t tmp = e_a - e_b; | |||||
| for (int i = 0; i < tmp; i++) { | for (int i = 0; i < tmp; i++) { | ||||
| m_tmp = m_tmp << 1; | m_tmp = m_tmp << 1; | ||||
| } | } | ||||
| @@ -690,8 +689,7 @@ static uint16_t Fp16Div(uint16_t v_1, uint16_t v_2) { | |||||
| m_b = m_tmp; | m_b = m_tmp; | ||||
| } | } | ||||
| m_div = static_cast<float>(m_a * 1.0f / m_b); | m_div = static_cast<float>(m_a * 1.0f / m_b); | ||||
| fp16_t fp_div; | |||||
| fp_div = m_div; | |||||
| fp16_t fp_div = m_div; | |||||
| ret = fp_div.val; | ret = fp_div.val; | ||||
| if (s_a != s_b) { | if (s_a != s_b) { | ||||
| ret |= kFp16SignMask; | ret |= kFp16SignMask; | ||||
| @@ -212,7 +212,7 @@ Status ParserUtils::HandleInputContext(const NodePtr &node, | |||||
| // add control edge | // add control edge | ||||
| if (node->GetInControlAnchor() != nullptr) { | if (node->GetInControlAnchor() != nullptr) { | ||||
| for (const auto &out_anchor : node->GetInControlAnchor()->GetPeerAnchors()) { | for (const auto &out_anchor : node->GetInControlAnchor()->GetPeerAnchors()) { | ||||
| graphStatus ret = GraphUtils::AddEdge(out_anchor, peer_in_anchor->GetOwnerNode()->GetInControlAnchor()); | |||||
| ret = GraphUtils::AddEdge(out_anchor, peer_in_anchor->GetOwnerNode()->GetInControlAnchor()); | |||||
| if (ret != GRAPH_SUCCESS) { | if (ret != GRAPH_SUCCESS) { | ||||
| REPORT_CALL_ERROR("E19999", "add control edge from %s to %s failed.", | REPORT_CALL_ERROR("E19999", "add control edge from %s to %s failed.", | ||||
| out_anchor->GetOwnerNode()->GetName().c_str(), | out_anchor->GetOwnerNode()->GetName().c_str(), | ||||
| @@ -767,8 +767,8 @@ Status OnnxModelParser::AdaptAndFindAllOnnxGraph(ge::onnx::GraphProto &root_onnx | |||||
| return FAILED; | return FAILED; | ||||
| } | } | ||||
| for (const auto &onnx_graph : onnx_graphs) { | |||||
| onnx_graph_tasks.push(onnx_graph); | |||||
| for (const auto &onnx_graph_task : onnx_graphs) { | |||||
| onnx_graph_tasks.push(onnx_graph_task); | |||||
| } | } | ||||
| for (const auto &itr : name_to_onnx_subgraph) { | for (const auto &itr : name_to_onnx_subgraph) { | ||||
| name_to_onnx_graph.emplace(itr.first, itr.second); | name_to_onnx_graph.emplace(itr.first, itr.second); | ||||
| @@ -77,7 +77,7 @@ protected: | |||||
| void RegisterCreator(const std::string &type, CREATOR_FUN fun); | void RegisterCreator(const std::string &type, CREATOR_FUN fun); | ||||
| private: | private: | ||||
| std::map<std::string, CREATOR_FUN> subgraph_adapter_creator_map_; // lint !e1073 | |||||
| std::map<std::string, CREATOR_FUN> subgraph_adapter_creator_map_; | |||||
| friend class SubgraphAdapterRegisterar; | friend class SubgraphAdapterRegisterar; | ||||
| }; | }; | ||||
| @@ -858,7 +858,6 @@ Status CreateNodeDefBytes(ge::NodePtr n, string originalType, map<string, PIOLis | |||||
| // Set size | // Set size | ||||
| for (auto ge_desc : opDesc->GetAllOutputsDescPtr()) { | for (auto ge_desc : opDesc->GetAllOutputsDescPtr()) { | ||||
| int64_t real_size = 1; | int64_t real_size = 1; | ||||
| int64_t tmp_dim = 0; | |||||
| auto data_type = ge_desc->GetDataType(); | auto data_type = ge_desc->GetDataType(); | ||||
| uint32_t size_type = 1; | uint32_t size_type = 1; | ||||
| @@ -873,7 +872,7 @@ Status CreateNodeDefBytes(ge::NodePtr n, string originalType, map<string, PIOLis | |||||
| // calculate size | // calculate size | ||||
| for (uint32_t j = 0; j < ge_desc->GetShape().GetDimNum(); ++j) { | for (uint32_t j = 0; j < ge_desc->GetShape().GetDimNum(); ++j) { | ||||
| tmp_dim = ge_desc->GetShape().GetDim(j); | |||||
| int64_t tmp_dim = ge_desc->GetShape().GetDim(j); | |||||
| GE_CHECK_GE(tmp_dim, 0); | GE_CHECK_GE(tmp_dim, 0); | ||||
| PARSER_INT64_MULCHECK(real_size, tmp_dim); | PARSER_INT64_MULCHECK(real_size, tmp_dim); | ||||
| real_size *= tmp_dim; | real_size *= tmp_dim; | ||||
| @@ -1279,8 +1278,7 @@ Status CreateFuncDefBytes(ge::NodePtr n, string original_type, string func_bin_p | |||||
| GELOGI("len =%d\n", len); | GELOGI("len =%d\n", len); | ||||
| ge::GeAttrValue::BYTES funcDefBytes; | |||||
| funcDefBytes = ge::Buffer::CopyFrom((std::uint8_t *)buf, len); | |||||
| ge::GeAttrValue::BYTES funcDefBytes = ge::Buffer::CopyFrom((std::uint8_t *)buf, len); | |||||
| (void)ge::AttrUtils::SetBytes(opDesc, ge::ATTR_NAME_FRAMEWORK_FUNC_DEF, funcDefBytes); | (void)ge::AttrUtils::SetBytes(opDesc, ge::ATTR_NAME_FRAMEWORK_FUNC_DEF, funcDefBytes); | ||||
| GELOGI("funcDefBytes.GetSize() =%zu", funcDefBytes.GetSize()); | GELOGI("funcDefBytes.GetSize() =%zu", funcDefBytes.GetSize()); | ||||
| @@ -55,7 +55,7 @@ class ParserGraphOptimizer { | |||||
| const bool GetLocalFmkopFlag() const { return local_fmk_op_flag_; } | const bool GetLocalFmkopFlag() const { return local_fmk_op_flag_; } | ||||
| void SetFuncBinPath(std::string isFuncBinPath) { func_bin_path_ = isFuncBinPath; } | |||||
| void SetFuncBinPath(const std::string &isFuncBinPath) { func_bin_path_ = isFuncBinPath; } | |||||
| const std::string GetFuncBinPath() const { return func_bin_path_; } | const std::string GetFuncBinPath() const { return func_bin_path_; } | ||||
| domi::Status InsertHWCK2FZ(ge::OutDataAnchorPtr src_anchor, ge::InDataAnchorPtr dst_anchor, | domi::Status InsertHWCK2FZ(ge::OutDataAnchorPtr src_anchor, ge::InDataAnchorPtr dst_anchor, | ||||
| @@ -1528,7 +1528,7 @@ Status TensorFlowModelParser::ParseAllGraph(const google::protobuf::Message *pro | |||||
| if (tensorflow_op_map.find(node_op) == tensorflow_op_map.end()) { | if (tensorflow_op_map.find(node_op) == tensorflow_op_map.end()) { | ||||
| GELOGW("%s not found in tensorflow_op_map.", node_op.c_str()); | GELOGW("%s not found in tensorflow_op_map.", node_op.c_str()); | ||||
| } | } | ||||
| Status ret = AddNode(node_def, graph, scope_graph); | |||||
| ret = AddNode(node_def, graph, scope_graph); | |||||
| if (ret != SUCCESS) { | if (ret != SUCCESS) { | ||||
| GELOGE(ret, "Add op[%s] failed", node_def->name().c_str()); | GELOGE(ret, "Add op[%s] failed", node_def->name().c_str()); | ||||
| DeleteFuisonNodeDef(); | DeleteFuisonNodeDef(); | ||||
| @@ -1688,7 +1688,6 @@ Status TensorFlowModelParser::CheckInputNodeName(const string &input_node_name, | |||||
| } | } | ||||
| } | } | ||||
| int32_t tmp_index = 0; | |||||
| auto find = tmp_input_node_name.find(":"); | auto find = tmp_input_node_name.find(":"); | ||||
| if (find == string::npos) { | if (find == string::npos) { | ||||
| *node_name = tmp_input_node_name; | *node_name = tmp_input_node_name; | ||||
| @@ -1696,6 +1695,7 @@ Status TensorFlowModelParser::CheckInputNodeName(const string &input_node_name, | |||||
| if (index == nullptr) { | if (index == nullptr) { | ||||
| return SUCCESS; | return SUCCESS; | ||||
| } | } | ||||
| int32_t tmp_index = 0; | |||||
| *index = tmp_index; | *index = tmp_index; | ||||
| return SUCCESS; | return SUCCESS; | ||||
| @@ -2060,15 +2060,14 @@ Status TensorFlowModelParser::EraseNormalOpOutputIfChild(shared_ptr<ge::ScopeGra | |||||
| for (auto iter = normal_op_node_context.output_map.begin(); iter != normal_op_node_context.output_map.end();) { | for (auto iter = normal_op_node_context.output_map.begin(); iter != normal_op_node_context.output_map.end();) { | ||||
| string output_node_name = iter->first; | string output_node_name = iter->first; | ||||
| ge::ScopeFusionOpInfo to_info; | ge::ScopeFusionOpInfo to_info; | ||||
| int32_t from_index = 0; | |||||
| int32_t to_index = 0; | |||||
| if (IsFusionOpChild(output_node_name, &to_info) && | if (IsFusionOpChild(output_node_name, &to_info) && | ||||
| nodedef_map_[output_node_name]->op() != TENSORFLOWF_NODE_OP_CONST) { | nodedef_map_[output_node_name]->op() != TENSORFLOWF_NODE_OP_CONST) { | ||||
| // Fuse operator, update index | // Fuse operator, update index | ||||
| std::vector<std::pair<int32_t, int32_t>> &pairs = iter->second; | std::vector<std::pair<int32_t, int32_t>> &pairs = iter->second; | ||||
| for (auto &pair : pairs) { | for (auto &pair : pairs) { | ||||
| from_index = pair.first; | |||||
| int32_t from_index = pair.first; | |||||
| int32_t to_index = 0; | |||||
| GE_RETURN_WITH_LOG_IF_ERROR(GetInPutIndex(scope_graph, to_info, pair.second, to_index), | GE_RETURN_WITH_LOG_IF_ERROR(GetInPutIndex(scope_graph, to_info, pair.second, to_index), | ||||
| "GetInPutIndex failed ,output_node_name %s.", output_node_name.c_str()); | "GetInPutIndex failed ,output_node_name %s.", output_node_name.c_str()); | ||||
| tmp_output_map[to_info.fusion_node_name].push_back({from_index, to_index}); | tmp_output_map[to_info.fusion_node_name].push_back({from_index, to_index}); | ||||
| @@ -2097,15 +2096,14 @@ Status TensorFlowModelParser::UpdateNormalOpContext(shared_ptr<ge::ScopeGraph> & | |||||
| for (auto iter = normal_op_node_context.input_map.begin(); iter != normal_op_node_context.input_map.end();) { | for (auto iter = normal_op_node_context.input_map.begin(); iter != normal_op_node_context.input_map.end();) { | ||||
| string input_node_name = iter->first; | string input_node_name = iter->first; | ||||
| ge::ScopeFusionOpInfo from_info; | ge::ScopeFusionOpInfo from_info; | ||||
| int32_t from_index = 0; | |||||
| int32_t to_index = 0; | |||||
| if (IsFusionOpChild(input_node_name, &from_info) && | if (IsFusionOpChild(input_node_name, &from_info) && | ||||
| nodedef_map_[input_node_name]->op() != TENSORFLOWF_NODE_OP_CONST) { | nodedef_map_[input_node_name]->op() != TENSORFLOWF_NODE_OP_CONST) { | ||||
| // Fuse operator, update index | // Fuse operator, update index | ||||
| std::vector<std::pair<int32_t, int32_t>> &pairs = iter->second; | std::vector<std::pair<int32_t, int32_t>> &pairs = iter->second; | ||||
| for (auto &pair : pairs) { | for (auto &pair : pairs) { | ||||
| to_index = pair.second; | |||||
| int32_t from_index = 0; | |||||
| int32_t to_index = pair.second; | |||||
| GE_RETURN_WITH_LOG_IF_ERROR(GetOutPutIndex(scope_graph, from_info, pair.first, from_index), | GE_RETURN_WITH_LOG_IF_ERROR(GetOutPutIndex(scope_graph, from_info, pair.first, from_index), | ||||
| "GetOutPutIndex failed ,input_node_name %s.", input_node_name.c_str()); | "GetOutPutIndex failed ,input_node_name %s.", input_node_name.c_str()); | ||||
| tmp_input_map[from_info.fusion_node_name].push_back({from_index, to_index}); | tmp_input_map[from_info.fusion_node_name].push_back({from_index, to_index}); | ||||
| @@ -2325,7 +2323,7 @@ Status TensorFlowModelParser::ParseProto(const google::protobuf::Message *proto, | |||||
| } | } | ||||
| // Do not exit immediately when there is an error, wait until all errors are collected before exiting | // Do not exit immediately when there is an error, wait until all errors are collected before exiting | ||||
| Status ret = AddFmkNodeDefToMap(*graph_def, node_def, op_node_name_list); | |||||
| ret = AddFmkNodeDefToMap(*graph_def, node_def, op_node_name_list); | |||||
| GE_CHK_STATUS_EXEC(ret, return PARAM_INVALID, "add node_def to map failed"); | GE_CHK_STATUS_EXEC(ret, return PARAM_INVALID, "add node_def to map failed"); | ||||
| } | } | ||||
| PARSER_TIMESTAMP_END(AddFmkNodeDefToMap, "TensorFlowModelParser::AddFmkNodeDefToMap"); | PARSER_TIMESTAMP_END(AddFmkNodeDefToMap, "TensorFlowModelParser::AddFmkNodeDefToMap"); | ||||
| @@ -3180,8 +3178,7 @@ Status TensorFlowModelParser::TrimGraphByInput(const domi::tensorflow::GraphDef | |||||
| output_graph_def->Clear(); | output_graph_def->Clear(); | ||||
| for (const NodeDef &node : filtered_graph_def.node()) { | for (const NodeDef &node : filtered_graph_def.node()) { | ||||
| if (input_nodes.count(node.name())) { | if (input_nodes.count(node.name())) { | ||||
| NodeDef placeholder_node; | |||||
| placeholder_node = node; | |||||
| NodeDef placeholder_node = node; | |||||
| placeholder_node.clear_input(); | placeholder_node.clear_input(); | ||||
| GE_IF_BOOL_EXEC(node.op() != "Placeholder", placeholder_node.set_op("Placeholder")); | GE_IF_BOOL_EXEC(node.op() != "Placeholder", placeholder_node.set_op("Placeholder")); | ||||
| domi::tensorflow::AttrValue attr_value; | domi::tensorflow::AttrValue attr_value; | ||||
| @@ -3778,8 +3775,8 @@ void TensorFlowModelParser::UpdateInnerInputMap(const string &fusion_op_name, Op | |||||
| std::map<std::string, std::vector<std::pair<int32_t, int32_t>>> tmp_input_map; | std::map<std::string, std::vector<std::pair<int32_t, int32_t>>> tmp_input_map; | ||||
| for (auto iter = op_node_context.input_map.begin(); iter != op_node_context.input_map.end();) { | for (auto iter = op_node_context.input_map.begin(); iter != op_node_context.input_map.end();) { | ||||
| string src_name = iter->first; | string src_name = iter->first; | ||||
| std::vector<std::pair<int32_t, int32_t>> &input_idx = iter->second; | |||||
| if (src_name == ge::kInputFromFusionScope) { | if (src_name == ge::kInputFromFusionScope) { | ||||
| std::vector<std::pair<int32_t, int32_t>> &input_idx = iter->second; | |||||
| for (const auto &in_pair : input_idx) { | for (const auto &in_pair : input_idx) { | ||||
| if (in_pair.second != kControlSlot) { | if (in_pair.second != kControlSlot) { | ||||
| auto data = remap_data_input[fusion_op_name + std::to_string(in_pair.first)]; | auto data = remap_data_input[fusion_op_name + std::to_string(in_pair.first)]; | ||||
| @@ -3825,8 +3822,8 @@ void TensorFlowModelParser::UpdateInnerOutputMap(const string &fusion_op_name, O | |||||
| std::map<std::string, std::vector<std::pair<int32_t, int32_t>>> tmp_output_map; | std::map<std::string, std::vector<std::pair<int32_t, int32_t>>> tmp_output_map; | ||||
| for (auto iter = op_node_context.output_map.begin(); iter != op_node_context.output_map.end();) { | for (auto iter = op_node_context.output_map.begin(); iter != op_node_context.output_map.end();) { | ||||
| string dst_name = iter->first; | string dst_name = iter->first; | ||||
| std::vector<std::pair<int32_t, int32_t>> &output_idx = iter->second; | |||||
| if (dst_name == ge::kOutputToFusionScope) { | if (dst_name == ge::kOutputToFusionScope) { | ||||
| std::vector<std::pair<int32_t, int32_t>> &output_idx = iter->second; | |||||
| for (const auto &out_pair : output_idx) { | for (const auto &out_pair : output_idx) { | ||||
| if (out_pair.second != kControlSlot) { | if (out_pair.second != kControlSlot) { | ||||
| auto data_outputs = remap_data_output[fusion_op_name + std::to_string(out_pair.second)]; | auto data_outputs = remap_data_output[fusion_op_name + std::to_string(out_pair.second)]; | ||||
| @@ -34,7 +34,6 @@ Status TensorFlowReshapeParser::ParseDesc(const domi::tensorflow::AttrValue &att | |||||
| "parse ge_desc failed."); | "parse ge_desc failed."); | ||||
| uint32_t size_type = 1; | uint32_t size_type = 1; | ||||
| int64_t real_size = 1; | int64_t real_size = 1; | ||||
| int64_t tmp_dim = 0; | |||||
| auto data_type = ge_desc.GetDataType(); | auto data_type = ge_desc.GetDataType(); | ||||
| bool type_ret = ge::TypeUtils::GetDataTypeLength(data_type, size_type); | bool type_ret = ge::TypeUtils::GetDataTypeLength(data_type, size_type); | ||||
| @@ -46,7 +45,7 @@ Status TensorFlowReshapeParser::ParseDesc(const domi::tensorflow::AttrValue &att | |||||
| return PARAM_INVALID); | return PARAM_INVALID); | ||||
| // calculate size | // calculate size | ||||
| for (uint32_t j = 0; j < ge_desc.GetShape().GetDimNum(); ++j) { | for (uint32_t j = 0; j < ge_desc.GetShape().GetDimNum(); ++j) { | ||||
| tmp_dim = ge_desc.GetShape().GetDim(j); | |||||
| int64_t tmp_dim = ge_desc.GetShape().GetDim(j); | |||||
| GE_IF_BOOL_EXEC(tmp_dim < 0, real_size = tmp_dim * (-1) * real_size; continue;); | GE_IF_BOOL_EXEC(tmp_dim < 0, real_size = tmp_dim * (-1) * real_size; continue;); | ||||
| real_size *= tmp_dim; | real_size *= tmp_dim; | ||||
| } | } | ||||
| @@ -38,7 +38,6 @@ Status TensorFlowSqueezeParser::ParseDesc(const domi::tensorflow::AttrValue &att | |||||
| "parse ge_desc failed."); | "parse ge_desc failed."); | ||||
| uint32_t size_type; | uint32_t size_type; | ||||
| int64_t real_size = 1; | int64_t real_size = 1; | ||||
| int64_t tmp_dim = 0; | |||||
| auto data_type = ge_desc.GetDataType(); | auto data_type = ge_desc.GetDataType(); | ||||
| bool type_ret = ge::TypeUtils::GetDataTypeLength(data_type, size_type); | bool type_ret = ge::TypeUtils::GetDataTypeLength(data_type, size_type); | ||||
| @@ -50,7 +49,7 @@ Status TensorFlowSqueezeParser::ParseDesc(const domi::tensorflow::AttrValue &att | |||||
| return domi::PARAM_INVALID); | return domi::PARAM_INVALID); | ||||
| // calculate size | // calculate size | ||||
| for (uint32_t j = 0; j < ge_desc.GetShape().GetDimNum(); ++j) { | for (uint32_t j = 0; j < ge_desc.GetShape().GetDimNum(); ++j) { | ||||
| tmp_dim = ge_desc.GetShape().GetDim(j); | |||||
| int64_t tmp_dim = ge_desc.GetShape().GetDim(j); | |||||
| GE_IF_BOOL_EXEC(tmp_dim < 0, real_size = tmp_dim * (-1) * real_size; continue;); | GE_IF_BOOL_EXEC(tmp_dim < 0, real_size = tmp_dim * (-1) * real_size; continue;); | ||||
| PARSER_INT64_MULCHECK(real_size, tmp_dim); | PARSER_INT64_MULCHECK(real_size, tmp_dim); | ||||
| real_size *= tmp_dim; | real_size *= tmp_dim; | ||||
| @@ -179,14 +179,13 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY domi::Status TensorFlowUtil::Tr | |||||
| "parse ge_desc failed."); | "parse ge_desc failed."); | ||||
| uint32_t size_type = 1; | uint32_t size_type = 1; | ||||
| int64_t tmp_dim = 0; | |||||
| auto data_type = ge_desc.GetDataType(); | auto data_type = ge_desc.GetDataType(); | ||||
| GE_CHK_BOOL_RET_STATUS(ge::TypeUtils::GetDataTypeLength(data_type, size_type), PARAM_INVALID, | GE_CHK_BOOL_RET_STATUS(ge::TypeUtils::GetDataTypeLength(data_type, size_type), PARAM_INVALID, | ||||
| "dataType no define size , parse ge_desc failed."); | "dataType no define size , parse ge_desc failed."); | ||||
| // get size | // get size | ||||
| for (uint32_t j = 0; j < ge_desc.GetShape().GetDimNum(); ++j) { | for (uint32_t j = 0; j < ge_desc.GetShape().GetDimNum(); ++j) { | ||||
| tmp_dim = ge_desc.GetShape().GetDim(j); | |||||
| int64_t tmp_dim = ge_desc.GetShape().GetDim(j); | |||||
| // The shape infered by fusedbatchnormgrad and mean calling tensorflow is not accurate. | // The shape infered by fusedbatchnormgrad and mean calling tensorflow is not accurate. | ||||
| // Here, special treatment is given to the two operators. | // Here, special treatment is given to the two operators. | ||||