Browse Source

update parser code from yellow zone

pull/3/head
taoxiangdong 5 years ago
parent
commit
f504f607d9
30 changed files with 1430 additions and 1382 deletions
  1. +32
    -0
      inc/external/caffe_parser.h
  2. +33
    -0
      inc/external/tensorflow_parser.h
  3. +4
    -3
      parser/caffe/caffe_custom_parser_adapter.cc
  4. +62
    -148
      parser/caffe/caffe_parser.cc
  5. +4
    -6
      parser/caffe/caffe_parser.h
  6. +2
    -2
      parser/caffe/caffe_reshape_parser.cc
  7. +190
    -190
      parser/caffe/proto/ge_ir.proto
  8. +72
    -0
      parser/common/acl_graph_parser_util.h
  9. +4
    -4
      parser/common/data_op_parser.cc
  10. +4
    -4
      parser/common/op_parser_factory.h
  11. +190
    -190
      parser/common/proto/ge_ir.proto
  12. +136
    -136
      parser/common/proto/insert_op.proto
  13. +3
    -3
      parser/common/register_tbe.cc
  14. +2
    -2
      parser/common/thread_pool.h
  15. +16
    -16
      parser/func_to_graph/proto_python_rule.mk
  16. +0
    -1
      parser/onnx/module.mk
  17. +1
    -1
      parser/onnx/onnx_constant_parser.cc
  18. +190
    -190
      parser/proto/ge_ir.proto
  19. +136
    -136
      parser/proto/insert_op.proto
  20. +2
    -2
      parser/tensorflow/graph_optimizer.cc
  21. +190
    -190
      parser/tensorflow/proto/ge_ir.proto
  22. +136
    -136
      parser/tensorflow/proto/insert_op.proto
  23. +2
    -2
      parser/tensorflow/scope/scope_pass_manager.cc
  24. +2
    -2
      parser/tensorflow/tensorflow_constant_parser.cc
  25. +2
    -2
      parser/tensorflow/tensorflow_fusion_op_parser.cc
  26. +7
    -7
      parser/tensorflow/tensorflow_parser.cc
  27. +3
    -3
      parser/tensorflow/tensorflow_parser_register.h
  28. +2
    -2
      parser/tensorflow/tensorflow_reshape_parser.cc
  29. +3
    -3
      parser/tensorflow/tensorflow_squeeze_parser.cc
  30. +0
    -1
      parser/tensorflow/tensorflow_util.cc

+ 32
- 0
inc/external/caffe_parser.h View File

@@ -0,0 +1,32 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef INC_EXTERNAL_ACL_GRAPH_CAFFE_H_
#define INC_EXTERNAL_ACL_GRAPH_CAFFE_H_

#include <memory>
#include <string>
#include <vector>

#include "graph/ge_error_codes.h"
#include "graph/types.h"
#include "graph/graph.h"

namespace ge {
graphStatus aclgrphParseCaffe(const char *model_file, const char *weights_file, ge::Graph &graph);
} // namespace ge

#endif // INC_EXTERNAL_ACL_GRAPH_CAFFE_H_

+ 33
- 0
inc/external/tensorflow_parser.h View File

@@ -0,0 +1,33 @@
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef INC_EXTERNAL_ACL_PARSER_TENSORFLOW_H_
#define INC_EXTERNAL_ACL_PARSER_TENSORFLOW_H_

#include <atomic>
#include <memory>
#include <string>
#include <vector>

#include "graph/ge_error_codes.h"
#include "graph/types.h"
#include "graph/graph.h"

namespace ge {
graphStatus aclgrphParseTensorFlow(const char *model_file, ge::Graph &graph);
} // namespace ge

#endif // INC_EXTERNAL_ACL_PARSER_TENSORFLOW_H_

+ 4
- 3
parser/caffe/caffe_custom_parser_adapter.cc View File

@@ -18,7 +18,7 @@
#include <memory>
#include <vector>
#include "common/debug/log.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/util.h"
#include "framework/common/debug/ge_log.h"
#include "framework/omg/omg_inner_types.h"
@@ -83,9 +83,10 @@ Status CaffeCustomParserAdapter::ParseWeights(const Message *op_src, ge::NodePtr
}

bool bias_en = false;
bool update_in_turn = (static_cast<int64_t >(op->GetAllInputsSize()) == (layer->bottom_size() + layer->blobs_size()));
int start_pos = layer->bottom_size();
for (int i = 0; i < layer->blobs_size(); ++i) {
ge::GeTensorPtr weight = ge::MakeShared<ge::GeTensor>();
ge::GeTensorPtr weight = ge::parser::MakeShared<ge::GeTensor>();
GE_CHECK_NOTNULL(weight);
GE_CHK_STATUS_RET(ConvertWeight(layer->blobs(i), layer->name(), weight), "Convert blobs(%d) for layer %s failed", i,
layer->name().c_str());
@@ -120,7 +121,7 @@ Status CaffeCustomParserAdapter::ParseWeights(const Message *op_src, ge::NodePtr
GE_CHECK_NOTNULL(const_node);
auto index = start_pos + i;
auto valid_input_name = op->GetValidInputNameByIndex(static_cast<uint32_t>(index));
if (valid_input_name.empty()) {
if (update_in_turn || valid_input_name.empty()) {
if (node->AddLinkFrom(static_cast<const uint32_t &>(index), const_node) != GRAPH_SUCCESS) {
GELOGE(GRAPH_FAILED, "AddEdge failed of from Node %s output to Node %s input %d", const_node->GetName().c_str(),
node->GetName().c_str(), index);


+ 62
- 148
parser/caffe/caffe_parser.cc View File

@@ -22,7 +22,7 @@
#include <memory>
#include "parser/common/convert/pb2json.h"
#include "common/debug/log.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/op_map.h"
#include "common/util/error_manager/error_manager.h"
#include "common/ge_types.h"
@@ -85,7 +85,7 @@ graphStatus aclgrphParseCaffe(const char *model_file, const char *weights_file,
(void)acl_graph_parse_util.AclParserInitialize(options);

// Create an empty computegraph
ge::ComputeGraphPtr compute_graph = ge::MakeShared<ge::ComputeGraph>("tmpGraph");
ge::ComputeGraphPtr compute_graph = ge::parser::MakeShared<ge::ComputeGraph>("tmpGraph");
GE_CHECK_NOTNULL(compute_graph);

graph = ge::GraphUtils::CreateGraphFromComputeGraph(compute_graph);
@@ -107,6 +107,10 @@ graphStatus aclgrphParseCaffe(const char *model_file, const char *weights_file,
return ret;
}
GELOGI("Weights parse success. graph: %s", graph.GetName().c_str());
if (acl_graph_parse_util.SetDefaultOutputNode(graph) != ge::SUCCESS) {
GELOGE(ret, "Set graph %s default output node failed.", graph.GetName().c_str());
return ge::FAILED;
}
return ge::SUCCESS;
}
} // namespace ge
@@ -803,10 +807,6 @@ Status CaffeModelParser::ParseOutputNodeTopInfo(const domi::caffe::NetParameter

Status CaffeModelParser::AddBlobsToMap(const domi::caffe::LayerParameter &layer,
std::map<std::string, std::string> &inplace_blob_name_remapping) {
if (layer.type() == ge::parser::NETOUTPUT) {
return SUCCESS;
}

if (layer.top_size() <= 0) {
ErrorManager::GetInstance().ATCReportErrMessage("E19011", {"opname"}, {layer.name()});
GELOGE(FAILED, "The output size of layer %s needs to be greater than zero.", layer.name().c_str());
@@ -1085,41 +1085,13 @@ Status CaffeModelParser::AddTensorDescToOpDesc(ge::OpDescPtr &op_desc, const dom
"while it's original input num is: %d",
layer.bottom_size());
}

// Netoutput node processing
if (op_desc->GetType() == ge::parser::NETOUTPUT) {
size_t input_output_tensor_num = 0;
if (!ge::GetParserContext().user_out_nodes.empty()) {
// User specified output
input_output_tensor_num = ge::GetParserContext().user_out_nodes.size();
} else {
for (auto t_iter = top_blobs_map_.begin(); t_iter != top_blobs_map_.end(); t_iter++) {
auto b_iter = bottom_blobs_map_.find(t_iter->first);
// Find the output node of the network
if (b_iter == bottom_blobs_map_.end()) {
input_output_tensor_num += top_blobs_map_[t_iter->first].size();
}
}
}
// add tensordesc
GELOGD(
"Current op type is NETOUTPUT, add additional input&output num: %zu."
"while it's original input num is: %d, output num is: %d",
input_output_tensor_num, layer.bottom_size(), output_tensor_num);
for (size_t i = 0; i < input_output_tensor_num; i++) {
ge::GeTensorDesc input_tensor;
GE_RETURN_IF_ERROR(op_desc->AddInputDesc(input_tensor));
ge::GeTensorDesc output_tensor;
GE_RETURN_IF_ERROR(op_desc->AddOutputDesc(output_tensor));
}
}
return SUCCESS;
}

Status CaffeModelParser::AddTensorDescToOpDescByIr(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer,
const string &op_type) {
if (std::find(kAddTensorIrSkipNodes.begin(), kAddTensorIrSkipNodes.end(), op_type) != kAddTensorIrSkipNodes.end()) {
op_desc = ge::MakeShared<ge::OpDesc>(layer.name(), op_type);
op_desc = ge::parser::MakeShared<ge::OpDesc>(layer.name(), op_type);
GE_CHECK_NOTNULL(op_desc);
Status ret = AddTensorDescToOpDesc(op_desc, layer);
if (ret != SUCCESS) {
@@ -1138,44 +1110,44 @@ Status CaffeModelParser::AddTensorDescToOpDescByIr(ge::OpDescPtr &op_desc, const
} else {
op_desc = ge::OpDescUtils::GetOpDescFromOperator(op_factory);
GE_CHECK_NOTNULL(op_desc);
auto valid_size = layer.bottom_size();
GELOGI("After GetOpDescFromOperator op[%s] type[%s] have all input size: %zu, caffe_input_size:%d output size: %zu",
auto valid_input_size = layer.bottom_size();
auto blob_size = layer.blobs_size();
GELOGI("After GetOpDescFromOperator op[%s] type[%s] have all input size: %zu, "
"caffe_input_size:%d blob_size %d output size: %zu",
op_desc->GetName().c_str(), op_desc->GetType().c_str(),
op_desc->GetAllInputsSize(), valid_size, op_desc->GetOutputsSize());
for (int i = 0; i < valid_size; i++) {
op_desc->GetAllInputsSize(), valid_input_size,
blob_size, op_desc->GetOutputsSize());
bool update_in_turn = (static_cast<int64_t >(op_desc->GetAllInputsSize()) == (valid_input_size + blob_size));
for (int i = 0; i < valid_input_size; i++) {
ge::GeTensorDesc input_tensor;
std::string input_name;
ge::graphStatus ret = ge::GRAPH_SUCCESS;
// Only two case is supported fow now when there are optional inputs
// Below cases are supported fow now when there are optional inputs
// x means optional, o means requierd input
// a. ooxxx, layer.bottom_size=number of o and x
// b. oxoxoxox, layer.bottom_size=number of o
if (static_cast<size_t>(i) >= op_desc->GetInputsSize()) {
ret = op_desc->UpdateInputDesc(static_cast<uint32_t>(i), input_tensor);
// a. ooxxx, number of o and x>=layer.bottom_size+layer.blobs_size>=number of o
// b. oxoxoxox, layer.bottom_size+layer.blobs_size=number of o
// c. oxoxoxox, layer.bottom_size+layer.blobs_size=number of o and x
if (update_in_turn) {
ret = op_desc->UpdateInputDesc(op_desc->GetInputNameByIndex(static_cast<uint32_t>(i)), input_tensor);
} else {
input_name = op_desc->GetValidInputNameByIndex(static_cast<uint32_t>(i));
ret = op_desc->UpdateInputDesc(input_name, input_tensor);
}

if (ret != ge::GRAPH_SUCCESS) {
GELOGW("op [%s], type[%s], update input(%d) with name %s failed", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), i, input_name.c_str());
} else {
GELOGI("op [%s], type[%s], update input(%d) with name %s success", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), i, input_name.c_str());
if (static_cast<size_t>(i) >= op_desc->GetInputsSize()) {
ret = op_desc->UpdateInputDesc(static_cast<uint32_t>(i), input_tensor);
} else {
input_name = op_desc->GetValidInputNameByIndex(static_cast<uint32_t>(i));
ret = op_desc->UpdateInputDesc(input_name, input_tensor);
}
}
GELOGI("op [%s], type[%s], update input(%d) with name %s %s", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), i, input_name.c_str(), ret == ge::GRAPH_SUCCESS ? "success" : "failed");
}

for (int i = 0; i < layer.top_size(); i++) {
ge::GeTensorDesc output_tensor;
ge::graphStatus ret = op_desc->UpdateOutputDesc(op_desc->GetOutputNameByIndex(i), output_tensor);
if (ret != ge::GRAPH_SUCCESS) {
GELOGW("op [%s], type[%s], update output(%d) with name %s failed", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), i, op_desc->GetOutputNameByIndex(i).c_str());
} else {
GELOGI("op [%s], type[%s], update output(%d) with name %s success", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), i, op_desc->GetOutputNameByIndex(i).c_str());
}
auto ret = op_desc->UpdateOutputDesc(op_desc->GetOutputNameByIndex(static_cast<uint32_t>(i)), output_tensor);
GELOGI("op [%s], type[%s], update output(%d) with name %s %s",
op_desc->GetName().c_str(), op_desc->GetType().c_str(),
i, op_desc->GetOutputNameByIndex(i).c_str(),
ret == ge::GRAPH_SUCCESS ? "success" : "failed");
}
}
return SUCCESS;
@@ -1226,6 +1198,12 @@ Status CaffeModelParser::AddEdges(ge::ComputeGraphPtr &graph) {
GELOGE(INTERNAL_ERROR, "Add link failed from op[%s] to op[%s].",
top_node_iter->second->GetName().c_str(), bottom_node_iter->second->GetName().c_str());
return INTERNAL_ERROR;);
auto op_desc = bottom_node_iter->second->GetOpDesc();
GE_CHECK_NOTNULL(op_desc);
auto out_op_desc = top_node_iter->second->GetOpDesc();
GE_CHECK_NOTNULL(out_op_desc);
(void) op_desc->UpdateInputDesc((static_cast<uint32_t>(in_archor_ptr->GetIdx())),
out_op_desc->GetOutputDesc(static_cast<uint32_t>(out_archor_ptr->GetIdx())));
}
GE_IF_BOOL_EXEC(top_node_iter == node_map.end(), ErrorManager::GetInstance().ATCReportErrMessage(
"E11014", {"opname"}, {top_blob_layer_pair.first});
@@ -1264,46 +1242,33 @@ bool CaffeModelParser::IsOutputTop(const string &op_name, const int32_t index) {
return ret;
}

Status CaffeModelParser::AddEdgeForUserOutNodes(ge::ComputeGraphPtr &graph) {
GE_CHECK_NOTNULL(graph);
ge::NodePtr net_output_node = graph->FindFirstNodeMatchType(ge::parser::NETOUTPUT);
if (net_output_node == nullptr) {
GELOGE(INTERNAL_ERROR, "Can not find netoutput node.");
return INTERNAL_ERROR;
}
uint32_t net_output_num = net_output_node->GetAllInDataAnchorsSize();
Status CaffeModelParser::AddUserOutNodesTop() {
int32_t index = 0;
const std::vector<std::pair<std::string, int32_t>> &user_out_nodes = ge::GetParserContext().user_out_nodes;
int net_output_num = user_out_nodes.size();
for (const auto &out_pair : user_out_nodes) {
auto node_iter = node_map.find(out_pair.first);
auto layer_iter = layer_tops_map_.find(out_pair.first);
GELOGI("Add to output, node name: %s", out_pair.first.c_str());
if (node_iter != node_map.end()) {
if ((static_cast<uint32_t>(out_pair.second) >= node_iter->second->GetAllOutDataAnchorsSize()) ||
(static_cast<uint32_t>(index) >= net_output_num)) {
if (layer_iter != layer_tops_map_.end()) {
if (static_cast<uint32_t>(out_pair.second) >= (layer_iter->second).size()) {
ErrorManager::GetInstance().ATCReportErrMessage(
"E11016", {"opname", "outputindex", "totlaloutputindex", "inputindex", "totlalinputindex"},
{out_pair.first.c_str(), std::to_string(out_pair.second),
std::to_string(node_iter->second->GetAllOutDataAnchorsSize()), std::to_string(index),
std::to_string((layer_iter->second).size()), std::to_string(index),
std::to_string(net_output_num)});
GELOGE(INTERNAL_ERROR,
"Add op %s to NetOutput faild, current node output index:%d should < %u. NetOutput"
"input_index:%d should < %u.",
out_pair.first.c_str(), out_pair.second, node_iter->second->GetAllOutDataAnchorsSize(), index,
out_pair.first.c_str(), out_pair.second, (layer_iter->second).size(), index,
net_output_num);
return INTERNAL_ERROR;
}
GELOGD("Start add edge for user out node: From %s:%d To %s:%d.", node_iter->second->GetName().c_str(),
out_pair.second, net_output_node->GetName().c_str(), index);
ge::OutDataAnchorPtr out_archor_ptr = node_iter->second->GetOutDataAnchor(out_pair.second);
GE_CHECK_NOTNULL(out_archor_ptr);
ge::InDataAnchorPtr in_archor_ptr = net_output_node->GetInDataAnchor(index);
GE_CHECK_NOTNULL(in_archor_ptr);
if (ge::GraphUtils::AddEdge(out_archor_ptr, in_archor_ptr) != ge::GRAPH_SUCCESS) {
ErrorManager::GetInstance().ATCReportErrMessage("E11013", {"opname1", "opname2"},
{node_iter->second->GetName(), net_output_node->GetName()});
GELOGE(INTERNAL_ERROR, "Add link failed from op[%s] to op[%s].", node_iter->second->GetName().c_str(),
net_output_node->GetName().c_str());
return INTERNAL_ERROR;

string top_name = layer_iter->second[out_pair.second];
auto top_node_iter = node_map.find(out_pair.first);
if (top_node_iter != node_map.end()) {
ge::GetParserContext().out_top_names.push_back(top_name);
GELOGI("The top of out node [%s] is [%s]", out_pair.first.c_str(), top_name.c_str());
}
++index;
} else {
@@ -1315,13 +1280,7 @@ Status CaffeModelParser::AddEdgeForUserOutNodes(ge::ComputeGraphPtr &graph) {
return SUCCESS;
}

Status CaffeModelParser::AddEdge4Output(const domi::caffe::NetParameter &proto_message, ge::ComputeGraphPtr &graph) {
GE_CHECK_NOTNULL(graph);
ge::NodePtr node = graph->FindFirstNodeMatchType(ge::parser::NETOUTPUT);

GE_RETURN_WITH_LOG_IF_FALSE(node != nullptr, "Net without output, some phase failed in front.");

int32_t index = 0;
Status CaffeModelParser::AddOutputTop(const domi::caffe::NetParameter &proto_message) {
for (int32_t i = 0; i < proto_message.layer_size(); i++) {
const domi::caffe::LayerParameter &layer = proto_message.layer(i);

@@ -1331,6 +1290,7 @@ Status CaffeModelParser::AddEdge4Output(const domi::caffe::NetParameter &proto_m

for (int i = 0; i < layer.top_size(); i++) {
string top = layer.top(i);
string top_origin = top;
// Handling 'inplace' scenarios
if (IsInplaceTopBlob(layer, top)) {
top = RemapTopNameByLayer(layer, top, i);
@@ -1352,21 +1312,9 @@ Status CaffeModelParser::AddEdge4Output(const domi::caffe::NetParameter &proto_m
auto top_node_iter = node_map.find(layer.name());
GELOGI("output in top_blob: %s", layer.name().c_str());
if (top_node_iter != node_map.end()) {
// add edge
// Output node, output index, input node, input index
GELOGD("Start add edge for out node: From %s:%d To %s:%d.", top_node_iter->second->GetName().c_str(), i,
node->GetName().c_str(), index);
ge::OutDataAnchorPtr out_archor_ptr = top_node_iter->second->GetOutDataAnchor(i);
GE_CHECK_NOTNULL(out_archor_ptr);
ge::InDataAnchorPtr in_archor_ptr = node->GetInDataAnchor(index);
GE_CHECK_NOTNULL(in_archor_ptr);
GE_IF_BOOL_EXEC(ge::GraphUtils::AddEdge(out_archor_ptr, in_archor_ptr) != ge::GRAPH_SUCCESS,
ErrorManager::GetInstance().ATCReportErrMessage(
"E11013", {"opname1", "opname2"}, {top_node_iter->second->GetName(), node->GetName()});
GELOGE(INTERNAL_ERROR, "Add link failed from op[%s] to to op[%s].",
top_node_iter->second->GetName().c_str(), node->GetName().c_str());
return INTERNAL_ERROR;);
index++;
ge::GetParserContext().out_top_names.push_back(top_origin);
ge::GetParserContext().default_out_nodes.push_back(std::make_pair(layer.name(), (int32_t)i));
GELOGI("The top of out node [%s] is [%s]", layer.name().c_str(), top_origin.c_str());
}
}
}
@@ -1480,12 +1428,6 @@ Status CaffeModelParser::ParseFromMemory(const char *data, uint32_t size, ge::Co
CHECK_FALSE_EXEC(ParseInput(proto_message, input_data_flag) == SUCCESS, has_error = true;
GELOGE(FAILED, "ParseInput ret fail."));

// build output layer
domi::caffe::LayerParameter *layer = proto_message.add_layer();
GE_CHECK_NOTNULL(layer);
layer->set_name(graph->GetName() + "_" + ge::parser::NODE_NAME_NET_OUTPUT);
layer->set_type(ge::parser::NETOUTPUT);

int32_t layer_count = proto_message.layer_size();
std::map<std::string, std::string> inplace_blob_name_remapping;
// Map of operator name and occurrence times
@@ -1551,9 +1493,9 @@ Status CaffeModelParser::ParseFromMemory(const char *data, uint32_t size, ge::Co
GE_RETURN_WITH_LOG_IF_ERROR(AddEdges(graph), "Caffe parser add edges fail.");

if (!(ge::GetParserContext().user_out_nodes.empty())) {
GE_RETURN_WITH_LOG_IF_ERROR(AddEdgeForUserOutNodes(graph), "Caffe parser add edges for user out nodes failed.");
GE_RETURN_WITH_LOG_IF_ERROR(AddUserOutNodesTop(), "Caffe parser add top_name for user out nodes failed.");
} else {
GE_RETURN_WITH_LOG_IF_ERROR(AddEdge4Output(proto_message, graph), "Caffe parser add edges for output fail.");
GE_RETURN_WITH_LOG_IF_ERROR(AddOutputTop(proto_message), "Caffe parser add top_name for output fail.");
}
GE_RETURN_WITH_LOG_IF_ERROR(graph->TopologicalSorting(), "Caffe parser call graph topo sort fail.");

@@ -1655,12 +1597,6 @@ Status CaffeModelParser::Parse(const char *model_path, ge::ComputeGraphPtr &grap
CHECK_FALSE_EXEC(ParseInput(proto_message, input_data_flag) == SUCCESS, has_error = true;
GELOGE(FAILED, "ParseInput ret fail."));

// build output layer
domi::caffe::LayerParameter *layer = proto_message.add_layer();
GE_CHECK_NOTNULL(layer);
layer->set_name(graph->GetName() + "_" + ge::parser::NODE_NAME_NET_OUTPUT);
layer->set_type(ge::parser::NETOUTPUT);

int32_t layer_count = proto_message.layer_size();

if (!ge::GetParserContext().user_out_nodes_top_vec.empty()) {
@@ -1733,12 +1669,11 @@ Status CaffeModelParser::Parse(const char *model_path, ge::ComputeGraphPtr &grap
GE_RETURN_WITH_LOG_IF_ERROR(AddEdges(graph), "Caffe parser add edges fail.");

if (!(ge::GetParserContext().user_out_nodes.empty())) {
GE_RETURN_WITH_LOG_IF_ERROR(AddEdgeForUserOutNodes(graph), "Caffe parser add edges for user out nodes failed.");
GE_RETURN_WITH_LOG_IF_ERROR(AddUserOutNodesTop(), "Caffe parser add top_name for user out nodes failed.");
} else {
GE_RETURN_WITH_LOG_IF_ERROR(AddEdge4Output(proto_message, graph), "Caffe parser add edges for output fail.");
GE_RETURN_WITH_LOG_IF_ERROR(AddOutputTop(proto_message), "Caffe parser add top_name for output fail.");
}
GE_RETURN_WITH_LOG_IF_ERROR(graph->TopologicalSorting(), "Caffe parser call graph topo sort fail.");
GE_RETURN_WITH_LOG_IF_ERROR(GetLeafNodeTops(graph), "Caffe parser get out nodes top names failed.");

auto nodes = graph->GetDirectNode();
GELOGI("graph node size = %zu.", nodes.size());
@@ -2449,27 +2384,6 @@ Status CaffeWeightsParser::ConvertNetParameter(const NetParameter &param, ge::Co
return SUCCESS;
}

Status CaffeModelParser::GetLeafNodeTops(ge::ComputeGraphPtr &graph) {
auto netout = graph->FindFirstNodeMatchType(ge::parser::NETOUTPUT);
GE_CHECK_NOTNULL(netout);
for (const auto &in_anchor : netout->GetAllInDataAnchors()) {
auto peer_out_data_anchor = in_anchor->GetPeerOutAnchor();
GE_CHECK_NOTNULL(peer_out_data_anchor);
auto peer_out_data_node = peer_out_data_anchor->GetOwnerNode();
GE_CHECK_NOTNULL(peer_out_data_node);
int idx = peer_out_data_anchor->GetIdx();
string node_name = peer_out_data_node->GetName();
auto layer_iter = layer_tops_map_.find(node_name);
if (layer_iter != layer_tops_map_.end()) {
ge::GetParserContext().out_top_names.push_back(layer_iter->second[idx]);
GELOGI("The top of out node [%s] is [%s]", node_name.c_str(), layer_iter->second[idx].c_str());
} else {
GELOGW("The out node [%s] can not find its top.", node_name.c_str());
}
}
return SUCCESS;
}

Status CaffeModelParser::ParseProto(const google::protobuf::Message *proto, ge::ComputeGraphPtr &graph) {
return SUCCESS;
}


+ 4
- 6
parser/caffe/caffe_parser.h View File

@@ -279,12 +279,12 @@ class CaffeModelParser : public domi::ModelParser {

/**
* @ingroup domi_omg
* @brief Add edge information to graph
* @param [in|out] graph graph for saving model information
* @brief Add top name information to graph
* @param [in|out] proto_message
* @return SUCCESS add successfully
* @return FAILED add failed
*/
Status AddEdge4Output(const domi::caffe::NetParameter &proto_message, ge::ComputeGraphPtr &graph);
Status AddOutputTop(const domi::caffe::NetParameter &proto_message);

/**
* @ingroup domi_omg
@@ -324,7 +324,7 @@ class CaffeModelParser : public domi::ModelParser {
Status AddTensorDescToOpDescByIr(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer,
const string &op_type);

Status AddEdgeForUserOutNodes(ge::ComputeGraphPtr &graph);
Status AddUserOutNodesTop();

std::string RemapTopNameByLayer(const domi::caffe::LayerParameter &layer, const std::string &top_name, int index);

@@ -335,8 +335,6 @@ class CaffeModelParser : public domi::ModelParser {
Status ParseOpParam(const domi::caffe::LayerParameter &layer, ge::OpDescPtr &op,
std::shared_ptr<ge::OpParser> &op_parser);

Status GetLeafNodeTops(ge::ComputeGraphPtr &graph);

void SaveOrigionLayerTops(domi::caffe::LayerParameter &layer);

Status ReorderInput(domi::caffe::NetParameter &net);


+ 2
- 2
parser/caffe/caffe_reshape_parser.cc View File

@@ -17,7 +17,7 @@
#include "parser/caffe/caffe_reshape_parser.h"
#include <vector>
#include "common/debug/log.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/op/op_parser_util.h"
#include "common/util.h"
#include "framework/common/debug/ge_log.h"
@@ -109,7 +109,7 @@ Status CaffeReshapeParser::AddConstInput(ge::NodePtr &node) {
}

// construct GeTensorPtr
ge::GeTensorPtr constTensor = ge::MakeShared<ge::GeTensor>();
ge::GeTensorPtr constTensor = ge::parser::MakeShared<ge::GeTensor>();
GE_CHECK_NOTNULL(constTensor);
constTensor->SetTensorDesc(const_desc);



+ 190
- 190
parser/caffe/proto/ge_ir.proto View File

@@ -1,190 +1,190 @@
syntax = "proto3";
package ge.proto;
enum DataType
{
DT_UNDEFINED = 0; // Used to indicate a DataType field has not been set.
DT_FLOAT = 1; // float type
DT_FLOAT16 = 2; // fp16 type
DT_INT8 = 3; // int8 type
DT_UINT8 = 4; // uint8 type
DT_INT16 = 5; // int16 type
DT_UINT16 = 6; // uint16 type
DT_INT32 = 7; //
DT_INT64 = 8; // int64 type
DT_UINT32 = 9; // unsigned int32
DT_UINT64 = 10; // unsigned int64
DT_BOOL = 11; // bool type
DT_DOUBLE = 12; // double type
DT_STRING = 13; // string type
DT_DUAL_SUB_INT8 = 14; /**< dual output int8 type */
DT_DUAL_SUB_UINT8 = 15; /**< dual output uint8 type */
DT_COMPLEX64 = 16; // complex64 type
DT_COMPLEX128 = 17; // complex128 type
DT_QINT8 = 18; // qint8 type
DT_QINT16 = 19; // qint16 type
DT_QINT32 = 20; // qint32 type
DT_QUINT8 = 21; // quint8 type
DT_QUINT16 = 22; // quint16 type
DT_RESOURCE = 23; // resource type
DT_STRING_REF = 24; // string_ref type
DT_DUAL = 25; /**< dual output type */
}
message AttrDef
{
message ListValue
{
enum ListValueType{
VT_LIST_NONE = 0;
VT_LIST_STRING = 1;
VT_LIST_INT = 2;
VT_LIST_FLOAT = 3;
VT_LIST_BOOL = 4;
VT_LIST_BYTES = 5;
VT_LIST_TENSOR_DESC = 6;
VT_LIST_TENSOR = 7;
VT_LIST_GRAPH = 8;
VT_LIST_NAMED_ATTRS = 9;
VT_LIST_DATA_TYPE = 10;
}
repeated bytes s = 2; // "list(string)"
repeated int64 i = 3; // "list(int)"
repeated float f = 4; // "list(float)"
repeated bool b = 5; // "list(bool)"
repeated bytes bt = 7;
repeated TensorDescriptor td = 8;
repeated TensorDef t = 9;
repeated GraphDef g = 10;
repeated NamedAttrs na = 11;
repeated int64 dt = 12; // list ge::DataType
ListValueType val_type = 20;
}
message ListListInt{
message ListInt{
repeated int64 list_i = 1; // list int
}
repeated ListInt list_list_i = 1; // list list int
}
oneof value
{
bytes s = 2; // "string"
int64 i = 3; // "int"
float f = 4; // "float"
bool b = 5; // "bool"
bytes bt = 7;
ListValue list = 1; // any "list(...)"
NamedAttrs func = 10; // Used to support attr nesting
TensorDescriptor td = 11; // GeTensorDesc type
TensorDef t = 12; // GeTensor type
GraphDef g = 13; // Graph type
ListListInt list_list_int = 14; // List List Int type
int64 dt = 15; // ge::DataType
}
}
// A list of attr names and their values. The whole list is attached
// with a string name. E.g., MatMul[T=float].
message NamedAttrs
{
string name = 1;
map<string, AttrDef> attr = 2;
}
// Shape / dimension description, using row-major order
message ShapeDef
{
repeated int64 dim = 1; // Size of each dimension
}
// Multidimensional data description
message TensorDescriptor
{
string name = 1; // Optional parameter, tensor name
DataType dtype = 2; // tensor datatype
ShapeDef shape = 3; // Shape / dimension
string layout = 4; // Tensor format, eg: "NCHW", "NHWC", "CHW", "ND"
bool has_out_attr = 9;
int64 size = 10;
int64 weight_size = 11;
bool reuse_input = 12;
bool output_tensor = 13;
string device_type = 14;
bool input_tensor =15;
int64 real_dim_cnt = 16;
int64 reuse_input_index = 17;
int64 data_offset = 18;
int64 cmps_size = 19;
string cmps_tab = 20;
int64 cmps_tab_offset = 21;
map<string, AttrDef> attr = 5; // Set of extra parameter fields
}
// GeTensor definition
message TensorDef
{
TensorDescriptor desc = 1; // Tensor description
bytes data = 2; // Tensor data
}
// Operator description
message OpDef
{
string name = 1; // name
string type = 2; // type
repeated string input = 5; // input original op name + outgoing index. op_name:index
map<string, AttrDef> attr = 10; // Set of operator parameter fields
bool has_out_attr = 20;
int64 id = 21;
int64 stream_id =22;
repeated string input_name = 23;
repeated string src_name = 24;
repeated int64 src_index = 25;
repeated string dst_name = 26;
repeated int64 dst_index = 27;
repeated int64 input_i = 28;
repeated int64 output_i = 29;
repeated int64 workspace = 30;
repeated int64 workspace_bytes = 31;
repeated bool is_input_const = 32;
repeated TensorDescriptor input_desc = 33;
repeated TensorDescriptor output_desc = 34;
repeated string subgraph_name = 35;
}
// Graph definition
message GraphDef
{
string name = 1; // name
repeated string input = 4; // Graph input
repeated string output = 5; // Graph output
repeated OpDef op = 6; // List of operators
map<string, AttrDef> attr = 11; // Extended field
}
// model definition
message ModelDef
{
string name = 1; // name
uint32 version = 2; // IR Proto verion
string custom_version = 3; // User model version number, passed in by user
repeated GraphDef graph = 7; // Graph definition,graph[0] represents the main diagram in modeldef
map<string, AttrDef> attr = 11; // Extended field
}
syntax = "proto3";
package ge.proto;
enum DataType
{
DT_UNDEFINED = 0; // Used to indicate a DataType field has not been set.
DT_FLOAT = 1; // float type
DT_FLOAT16 = 2; // fp16 type
DT_INT8 = 3; // int8 type
DT_UINT8 = 4; // uint8 type
DT_INT16 = 5; // int16 type
DT_UINT16 = 6; // uint16 type
DT_INT32 = 7; //
DT_INT64 = 8; // int64 type
DT_UINT32 = 9; // unsigned int32
DT_UINT64 = 10; // unsigned int64
DT_BOOL = 11; // bool type
DT_DOUBLE = 12; // double type
DT_STRING = 13; // string type
DT_DUAL_SUB_INT8 = 14; /**< dual output int8 type */
DT_DUAL_SUB_UINT8 = 15; /**< dual output uint8 type */
DT_COMPLEX64 = 16; // complex64 type
DT_COMPLEX128 = 17; // complex128 type
DT_QINT8 = 18; // qint8 type
DT_QINT16 = 19; // qint16 type
DT_QINT32 = 20; // qint32 type
DT_QUINT8 = 21; // quint8 type
DT_QUINT16 = 22; // quint16 type
DT_RESOURCE = 23; // resource type
DT_STRING_REF = 24; // string_ref type
DT_DUAL = 25; /**< dual output type */
}
message AttrDef
{
message ListValue
{
enum ListValueType{
VT_LIST_NONE = 0;
VT_LIST_STRING = 1;
VT_LIST_INT = 2;
VT_LIST_FLOAT = 3;
VT_LIST_BOOL = 4;
VT_LIST_BYTES = 5;
VT_LIST_TENSOR_DESC = 6;
VT_LIST_TENSOR = 7;
VT_LIST_GRAPH = 8;
VT_LIST_NAMED_ATTRS = 9;
VT_LIST_DATA_TYPE = 10;
}
repeated bytes s = 2; // "list(string)"
repeated int64 i = 3; // "list(int)"
repeated float f = 4; // "list(float)"
repeated bool b = 5; // "list(bool)"
repeated bytes bt = 7;
repeated TensorDescriptor td = 8;
repeated TensorDef t = 9;
repeated GraphDef g = 10;
repeated NamedAttrs na = 11;
repeated int64 dt = 12; // list ge::DataType
ListValueType val_type = 20;
}
message ListListInt{
message ListInt{
repeated int64 list_i = 1; // list int
}
repeated ListInt list_list_i = 1; // list list int
}
oneof value
{
bytes s = 2; // "string"
int64 i = 3; // "int"
float f = 4; // "float"
bool b = 5; // "bool"
bytes bt = 7;
ListValue list = 1; // any "list(...)"
NamedAttrs func = 10; // Used to support attr nesting
TensorDescriptor td = 11; // GeTensorDesc type
TensorDef t = 12; // GeTensor type
GraphDef g = 13; // Graph type
ListListInt list_list_int = 14; // List List Int type
int64 dt = 15; // ge::DataType
}
}
// A list of attr names and their values. The whole list is attached
// with a string name. E.g., MatMul[T=float].
message NamedAttrs
{
string name = 1;
map<string, AttrDef> attr = 2;
}
// Shape / dimension description, using row-major order
message ShapeDef
{
repeated int64 dim = 1; // Size of each dimension
}
// Multidimensional data description
message TensorDescriptor
{
string name = 1; // Optional parameter, tensor name
DataType dtype = 2; // tensor datatype
ShapeDef shape = 3; // Shape / dimension
string layout = 4; // Tensor format, eg: "NCHW", "NHWC", "CHW", "ND"
bool has_out_attr = 9;
int64 size = 10;
int64 weight_size = 11;
bool reuse_input = 12;
bool output_tensor = 13;
string device_type = 14;
bool input_tensor =15;
int64 real_dim_cnt = 16;
int64 reuse_input_index = 17;
int64 data_offset = 18;
int64 cmps_size = 19;
string cmps_tab = 20;
int64 cmps_tab_offset = 21;
map<string, AttrDef> attr = 5; // Set of extra parameter fields
}
// GeTensor definition
message TensorDef
{
TensorDescriptor desc = 1; // Tensor description
bytes data = 2; // Tensor data
}
// Operator description
message OpDef
{
string name = 1; // name
string type = 2; // type
repeated string input = 5; // input original op name + outgoing index. op_name:index
map<string, AttrDef> attr = 10; // Set of operator parameter fields
bool has_out_attr = 20;
int64 id = 21;
int64 stream_id =22;
repeated string input_name = 23;
repeated string src_name = 24;
repeated int64 src_index = 25;
repeated string dst_name = 26;
repeated int64 dst_index = 27;
repeated int64 input_i = 28;
repeated int64 output_i = 29;
repeated int64 workspace = 30;
repeated int64 workspace_bytes = 31;
repeated bool is_input_const = 32;
repeated TensorDescriptor input_desc = 33;
repeated TensorDescriptor output_desc = 34;
repeated string subgraph_name = 35;
}
// Graph definition
message GraphDef
{
string name = 1; // name
repeated string input = 4; // Graph input
repeated string output = 5; // Graph output
repeated OpDef op = 6; // List of operators
map<string, AttrDef> attr = 11; // Extended field
}
// model definition
message ModelDef
{
string name = 1; // name
uint32 version = 2; // IR Proto verion
string custom_version = 3; // User model version number, passed in by user
repeated GraphDef graph = 7; // Graph definition,graph[0] represents the main diagram in modeldef
map<string, AttrDef> attr = 11; // Extended field
}

+ 72
- 0
parser/common/acl_graph_parser_util.h View File

@@ -137,6 +137,78 @@ bool ValidateStr(const std::string &filePath, const std::string &mode);
/// @return Time character string in the format: %Y%m%d%H%M%S, eg: 20171011083555
///
std::string CurrentTimeInStr();

template <typename T, typename... Args>
static inline std::shared_ptr<T> MakeShared(Args &&... args) {
typedef typename std::remove_const<T>::type T_nc;
std::shared_ptr<T> ret(new (std::nothrow) T_nc(std::forward<Args>(args)...));
return ret;
}

/// @ingroup math_util
/// @brief check whether int64 multiplication can result in overflow
/// @param [in] a multiplicator
/// @param [in] b multiplicator
/// @return Status
inline domi::Status Int64MulCheckOverflow(int64_t a, int64_t b) {
if (a > 0) {
if (b > 0) {
if (a > (INT64_MAX / b)) {
return domi::FAILED;
}
} else {
if (b < (INT64_MIN / a)) {
return domi::FAILED;
}
}
} else {
if (b > 0) {
if (a < (INT64_MIN / b)) {
return domi::FAILED;
}
} else {
if ((a != 0) && (b < (INT64_MAX / a))) {
return domi::FAILED;
}
}
}
return domi::SUCCESS;
}

/// @ingroup math_util
/// @brief check whether int64 multiplication can result in overflow
/// @param [in] a multiplicator
/// @param [in] b multiplicator
/// @return Status
inline domi::Status CheckInt64Uint32MulOverflow(int64_t a, uint32_t b) {
if (a == 0 || b == 0) {
return domi::SUCCESS;
}
if (a > 0) {
if (a > (INT64_MAX / b)) {
return domi::FAILED;
}
} else {
if (a < (INT64_MIN / b)) {
return domi::FAILED;
}
}
return domi::SUCCESS;
}

#define PARSER_INT64_MULCHECK(a, b) \
if (ge::parser::Int64MulCheckOverflow((a), (b)) != SUCCESS) { \
GELOGW("Int64 %ld and %ld multiplication can result in overflow!", static_cast<int64_t>(a), \
static_cast<int64_t>(b)); \
return INTERNAL_ERROR; \
}

#define PARSER_INT64_UINT32_MULCHECK(a, b) \
if (ge::parser::CheckInt64Uint32MulOverflow((a), (b)) != SUCCESS) { \
GELOGW("Int64 %ld and UINT32 %u multiplication can result in overflow!", static_cast<uint32_t>(a), \
static_cast<uint32_t>(b)); \
return INTERNAL_ERROR; \
}
} // namespace parser
} // namespace ge



+ 4
- 4
parser/common/data_op_parser.cc View File

@@ -18,7 +18,7 @@
#include <cstdlib>
#include "common/debug/log.h"
#include "common/op/ge_op_utils.h"
#include "common/math/math_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/util.h"
#include "graph/utils/type_utils.h"
#include "omg/omg.h"
@@ -128,10 +128,10 @@ Status DataOpParser::InitNDTensor(const vector<int64_t> &shape, ge::DataType dat
}
uint32_t type_size = 0;
if (ge::TypeUtils::GetDataTypeLength(data_type, type_size)) {
FMK_INT64_UINT32_MULCHECK(size, type_size);
PARSER_INT64_UINT32_MULCHECK(size, type_size);
size *= type_size;
} else {
FMK_INT64_UINT32_MULCHECK(size, static_cast<uint32_t>(sizeof(float)));
PARSER_INT64_UINT32_MULCHECK(size, static_cast<uint32_t>(sizeof(float)));
size *= sizeof(float);
}
ge::TensorUtils::SetSize(tensor_desc, size);
@@ -169,7 +169,7 @@ Status DataOpParser::InitInputTensor(const vector<int64_t> &shape, ge::GeTensorD
if (input.GetShape().GetDim(0) != -1) {
size = input.GetShape().GetShapeSize();
}
FMK_INT64_UINT32_MULCHECK(size, static_cast<uint32_t>(sizeof(float)));
PARSER_INT64_UINT32_MULCHECK(size, static_cast<uint32_t>(sizeof(float)));
ge::TensorUtils::SetSize(input, size * sizeof(float));

return SUCCESS;


+ 4
- 4
parser/common/op_parser_factory.h View File

@@ -23,7 +23,7 @@
#include <mutex>
#include <string>
#include <vector>
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "framework/omg/parser/parser_types.h"
#include "framework/common/debug/ge_log.h"
#include "omg/omg_inner_types.h"
@@ -162,7 +162,7 @@ class CustomParserAdapterRegistrar {
*/
#define REGISTER_OP_PARSER_CREATOR(framework, op_type, clazz) \
std::shared_ptr<OpParser> Creator_##framework##_##op_type##_Op_Parser() { \
std::shared_ptr<clazz> ptr = ge::MakeShared<clazz>(); \
std::shared_ptr<clazz> ptr = ge::parser::MakeShared<clazz>(); \
if (ptr == nullptr) { \
GELOGW("MakeShared failed, result is nullptr."); \
} \
@@ -173,7 +173,7 @@ class CustomParserAdapterRegistrar {

#define REGISTER_FUSION_OP_PARSER_CREATOR(framework, op_type, clazz) \
std::shared_ptr<OpParser> Creator_##framework##_##op_type##_Fusion_Op_Parser() { \
std::shared_ptr<clazz> ptr = ge::MakeShared<clazz>(); \
std::shared_ptr<clazz> ptr = ge::parser::MakeShared<clazz>(); \
if (ptr == nullptr) { \
GELOGW("MakeShared failed, result is nullptr."); \
} \
@@ -187,7 +187,7 @@ class CustomParserAdapterRegistrar {
/// @param [in] clazz CaffeCustomParserAdapter adaptation class
#define REGISTER_CUSTOM_PARSER_ADAPTER_CREATOR(framework, clazz) \
std::shared_ptr<OpParser> Creator_##framework##_Op_Parser_Adapter() { \
std::shared_ptr<clazz> ptr = ge::MakeShared<clazz>(); \
std::shared_ptr<clazz> ptr = ge::parser::MakeShared<clazz>(); \
if (ptr == nullptr) { \
GELOGW("MakeShared failed, result is nullptr."); \
} \


+ 190
- 190
parser/common/proto/ge_ir.proto View File

@@ -1,190 +1,190 @@
syntax = "proto3";
package ge.proto;
enum DataType
{
DT_UNDEFINED = 0; // Used to indicate a DataType field has not been set.
DT_FLOAT = 1; // float type
DT_FLOAT16 = 2; // fp16 type
DT_INT8 = 3; // int8 type
DT_UINT8 = 4; // uint8 type
DT_INT16 = 5; // int16 type
DT_UINT16 = 6; // uint16 type
DT_INT32 = 7; //
DT_INT64 = 8; // int64 type
DT_UINT32 = 9; // unsigned int32
DT_UINT64 = 10; // unsigned int64
DT_BOOL = 11; // bool type
DT_DOUBLE = 12; // double type
DT_STRING = 13; // string type
DT_DUAL_SUB_INT8 = 14; /**< dual output int8 type */
DT_DUAL_SUB_UINT8 = 15; /**< dual output uint8 type */
DT_COMPLEX64 = 16; // complex64 type
DT_COMPLEX128 = 17; // complex128 type
DT_QINT8 = 18; // qint8 type
DT_QINT16 = 19; // qint16 type
DT_QINT32 = 20; // qint32 type
DT_QUINT8 = 21; // quint8 type
DT_QUINT16 = 22; // quint16 type
DT_RESOURCE = 23; // resource type
DT_STRING_REF = 24; // string_ref type
DT_DUAL = 25; /**< dual output type */
}
message AttrDef
{
message ListValue
{
enum ListValueType{
VT_LIST_NONE = 0;
VT_LIST_STRING = 1;
VT_LIST_INT = 2;
VT_LIST_FLOAT = 3;
VT_LIST_BOOL = 4;
VT_LIST_BYTES = 5;
VT_LIST_TENSOR_DESC = 6;
VT_LIST_TENSOR = 7;
VT_LIST_GRAPH = 8;
VT_LIST_NAMED_ATTRS = 9;
VT_LIST_DATA_TYPE = 10;
}
repeated bytes s = 2; // "list(string)"
repeated int64 i = 3; // "list(int)"
repeated float f = 4; // "list(float)"
repeated bool b = 5; // "list(bool)"
repeated bytes bt = 7;
repeated TensorDescriptor td = 8;
repeated TensorDef t = 9;
repeated GraphDef g = 10;
repeated NamedAttrs na = 11;
repeated int64 dt = 12; // list ge::DataType
ListValueType val_type = 20;
}
message ListListInt{
message ListInt{
repeated int64 list_i = 1; // list int
}
repeated ListInt list_list_i = 1; // list list int
}
oneof value
{
bytes s = 2; // "string"
int64 i = 3; // "int"
float f = 4; // "float"
bool b = 5; // "bool"
bytes bt = 7;
ListValue list = 1; // any "list(...)"
NamedAttrs func = 10; // Used to support attr nesting
TensorDescriptor td = 11; // GeTensorDesc type
TensorDef t = 12; // GeTensor type
GraphDef g = 13; // Graph type
ListListInt list_list_int = 14; // List List Int type
int64 dt = 15; // ge::DataType
}
}
// A list of attr names and their values. The whole list is attached
// with a string name. E.g., MatMul[T=float].
message NamedAttrs
{
string name = 1;
map<string, AttrDef> attr = 2;
}
// Shape / dimension description, using row-major order
message ShapeDef
{
repeated int64 dim = 1; // Size of each dimension
}
// Multidimensional data description
message TensorDescriptor
{
string name = 1; // Optional parameter, tensor name
DataType dtype = 2; // tensor datatype
ShapeDef shape = 3; // Shape / dimension
string layout = 4; // Tensor format, eg: "NCHW", "NHWC", "CHW", "ND"
bool has_out_attr = 9;
int64 size = 10;
int64 weight_size = 11;
bool reuse_input = 12;
bool output_tensor = 13;
string device_type = 14;
bool input_tensor =15;
int64 real_dim_cnt = 16;
int64 reuse_input_index = 17;
int64 data_offset = 18;
int64 cmps_size = 19;
string cmps_tab = 20;
int64 cmps_tab_offset = 21;
map<string, AttrDef> attr = 5; // Set of extra parameter fields
}
// GeTensor definition
message TensorDef
{
TensorDescriptor desc = 1; // Tensor description
bytes data = 2; // Tensor data
}
// Operator description
message OpDef
{
string name = 1; // name
string type = 2; // type
repeated string input = 5; // input original op name + outgoing index. op_name:index
map<string, AttrDef> attr = 10; // Set of operator parameter fields
bool has_out_attr = 20;
int64 id = 21;
int64 stream_id =22;
repeated string input_name = 23;
repeated string src_name = 24;
repeated int64 src_index = 25;
repeated string dst_name = 26;
repeated int64 dst_index = 27;
repeated int64 input_i = 28;
repeated int64 output_i = 29;
repeated int64 workspace = 30;
repeated int64 workspace_bytes = 31;
repeated bool is_input_const = 32;
repeated TensorDescriptor input_desc = 33;
repeated TensorDescriptor output_desc = 34;
repeated string subgraph_name = 35;
}
// Graph definition
message GraphDef
{
string name = 1; // name
repeated string input = 4; // Graph input
repeated string output = 5; // Graph output
repeated OpDef op = 6; // List of operators
map<string, AttrDef> attr = 11; // Extended field
}
// model definition
message ModelDef
{
string name = 1; // name
uint32 version = 2; // IR Proto verion
string custom_version = 3; // User model version number, passed in by user
repeated GraphDef graph = 7; // Graph definition,graph[0] represents the main diagram in modeldef
map<string, AttrDef> attr = 11; // Extended field
}
syntax = "proto3";
package ge.proto;
enum DataType
{
DT_UNDEFINED = 0; // Used to indicate a DataType field has not been set.
DT_FLOAT = 1; // float type
DT_FLOAT16 = 2; // fp16 type
DT_INT8 = 3; // int8 type
DT_UINT8 = 4; // uint8 type
DT_INT16 = 5; // int16 type
DT_UINT16 = 6; // uint16 type
DT_INT32 = 7; //
DT_INT64 = 8; // int64 type
DT_UINT32 = 9; // unsigned int32
DT_UINT64 = 10; // unsigned int64
DT_BOOL = 11; // bool type
DT_DOUBLE = 12; // double type
DT_STRING = 13; // string type
DT_DUAL_SUB_INT8 = 14; /**< dual output int8 type */
DT_DUAL_SUB_UINT8 = 15; /**< dual output uint8 type */
DT_COMPLEX64 = 16; // complex64 type
DT_COMPLEX128 = 17; // complex128 type
DT_QINT8 = 18; // qint8 type
DT_QINT16 = 19; // qint16 type
DT_QINT32 = 20; // qint32 type
DT_QUINT8 = 21; // quint8 type
DT_QUINT16 = 22; // quint16 type
DT_RESOURCE = 23; // resource type
DT_STRING_REF = 24; // string_ref type
DT_DUAL = 25; /**< dual output type */
}
message AttrDef
{
message ListValue
{
enum ListValueType{
VT_LIST_NONE = 0;
VT_LIST_STRING = 1;
VT_LIST_INT = 2;
VT_LIST_FLOAT = 3;
VT_LIST_BOOL = 4;
VT_LIST_BYTES = 5;
VT_LIST_TENSOR_DESC = 6;
VT_LIST_TENSOR = 7;
VT_LIST_GRAPH = 8;
VT_LIST_NAMED_ATTRS = 9;
VT_LIST_DATA_TYPE = 10;
}
repeated bytes s = 2; // "list(string)"
repeated int64 i = 3; // "list(int)"
repeated float f = 4; // "list(float)"
repeated bool b = 5; // "list(bool)"
repeated bytes bt = 7;
repeated TensorDescriptor td = 8;
repeated TensorDef t = 9;
repeated GraphDef g = 10;
repeated NamedAttrs na = 11;
repeated int64 dt = 12; // list ge::DataType
ListValueType val_type = 20;
}
message ListListInt{
message ListInt{
repeated int64 list_i = 1; // list int
}
repeated ListInt list_list_i = 1; // list list int
}
oneof value
{
bytes s = 2; // "string"
int64 i = 3; // "int"
float f = 4; // "float"
bool b = 5; // "bool"
bytes bt = 7;
ListValue list = 1; // any "list(...)"
NamedAttrs func = 10; // Used to support attr nesting
TensorDescriptor td = 11; // GeTensorDesc type
TensorDef t = 12; // GeTensor type
GraphDef g = 13; // Graph type
ListListInt list_list_int = 14; // List List Int type
int64 dt = 15; // ge::DataType
}
}
// A list of attr names and their values. The whole list is attached
// with a string name. E.g., MatMul[T=float].
message NamedAttrs
{
string name = 1;
map<string, AttrDef> attr = 2;
}
// Shape / dimension description, using row-major order
message ShapeDef
{
repeated int64 dim = 1; // Size of each dimension
}
// Multidimensional data description
message TensorDescriptor
{
string name = 1; // Optional parameter, tensor name
DataType dtype = 2; // tensor datatype
ShapeDef shape = 3; // Shape / dimension
string layout = 4; // Tensor format, eg: "NCHW", "NHWC", "CHW", "ND"
bool has_out_attr = 9;
int64 size = 10;
int64 weight_size = 11;
bool reuse_input = 12;
bool output_tensor = 13;
string device_type = 14;
bool input_tensor =15;
int64 real_dim_cnt = 16;
int64 reuse_input_index = 17;
int64 data_offset = 18;
int64 cmps_size = 19;
string cmps_tab = 20;
int64 cmps_tab_offset = 21;
map<string, AttrDef> attr = 5; // Set of extra parameter fields
}
// GeTensor definition
message TensorDef
{
TensorDescriptor desc = 1; // Tensor description
bytes data = 2; // Tensor data
}
// Operator description
message OpDef
{
string name = 1; // name
string type = 2; // type
repeated string input = 5; // input original op name + outgoing index. op_name:index
map<string, AttrDef> attr = 10; // Set of operator parameter fields
bool has_out_attr = 20;
int64 id = 21;
int64 stream_id =22;
repeated string input_name = 23;
repeated string src_name = 24;
repeated int64 src_index = 25;
repeated string dst_name = 26;
repeated int64 dst_index = 27;
repeated int64 input_i = 28;
repeated int64 output_i = 29;
repeated int64 workspace = 30;
repeated int64 workspace_bytes = 31;
repeated bool is_input_const = 32;
repeated TensorDescriptor input_desc = 33;
repeated TensorDescriptor output_desc = 34;
repeated string subgraph_name = 35;
}
// Graph definition
message GraphDef
{
string name = 1; // name
repeated string input = 4; // Graph input
repeated string output = 5; // Graph output
repeated OpDef op = 6; // List of operators
map<string, AttrDef> attr = 11; // Extended field
}
// model definition
message ModelDef
{
string name = 1; // name
uint32 version = 2; // IR Proto verion
string custom_version = 3; // User model version number, passed in by user
repeated GraphDef graph = 7; // Graph definition,graph[0] represents the main diagram in modeldef
map<string, AttrDef> attr = 11; // Extended field
}

+ 136
- 136
parser/common/proto/insert_op.proto View File

@@ -1,136 +1,136 @@
syntax = "proto3";
package domi;
message InsertNewOps {
repeated AippOpParams aipp_op = 1;
repeated MultiShapeOpParams multi_shape_op = 2;
}
message AippOpParams {
enum InputFormat {
UNDEFINED = 0;
YUV420SP_U8 = 1;
XRGB8888_U8 = 2;
RGB888_U8 = 3;
YUV400_U8 = 4;
NC1HWC0DI_FP16 = 5;
NC1HWC0DI_S8 = 6;
ARGB8888_U8 = 7;
YUYV_U8 = 8;
YUV422SP_U8 = 9;
AYUV444_U8 = 10;
RAW10 = 11;
RAW12 = 12;
RAW16 = 13;
RAW24 = 14;
RGB16 = 15;
RGB20 = 16;
RGB24 = 17;
RGB8_IR = 18;
RGB16_IR = 19;
RGB24_IR = 20;
}
enum AippMode {
undefined = 0;
static = 1;
dynamic = 2;
}
// AIPP模式,区分静态AIPP和动态AIPP
AippMode aipp_mode = 1;
// related_input_rank参数为必填,类型为整型,配置范围>=0, <=输入Data算子的个数,默认值为0。
// 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。
uint32 related_input_rank = 2;
// input_edge_idx参数为可选,类型为整型,配置范围为>=0。
// 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。
// 配置值 <= Data算子输出边的个数。
repeated uint32 input_edge_idx = 3;
// [Begin] 动态AIPP参数,配置静态AIPP时无效
uint32 max_src_image_size = 4;
// 是否支持旋转。默认不支持,开启支持旋转时,会有额外的空间和性能损失
bool support_rotation = 5;
// [End] 动态AIPP参数
// [Begin] 静态AIPP参数,配置动态AIPP时无效
InputFormat input_format = 51;
bool csc_switch = 52;
float cpadding_value = 53;
bool rbuv_swap_switch = 54;
bool ax_swap_switch = 55;
bool single_line_mode = 56;
int32 src_image_size_w = 57;
int32 src_image_size_h = 58;
bool crop = 59;
int32 load_start_pos_w = 60;
int32 load_start_pos_h = 61;
int32 crop_size_w = 62;
int32 crop_size_h = 63;
bool resize = 64;
int32 resize_output_w = 65;
int32 resize_output_h = 66;
bool padding = 67;
int32 left_padding_size = 68;
int32 right_padding_size = 69;
int32 top_padding_size = 70;
int32 bottom_padding_size = 71;
int32 mean_chn_0 = 10;
int32 mean_chn_1 = 11;
int32 mean_chn_2 = 12;
int32 mean_chn_3 = 19;
float min_chn_0 = 13;
float min_chn_1 = 14;
float min_chn_2 = 15;
float min_chn_3 = 20;
repeated float var_reci_chn_0 = 16;
repeated float var_reci_chn_1 = 17;
repeated float var_reci_chn_2 = 18;
repeated float var_reci_chn_3 = 21;
repeated int32 matrix_r0c0 = 30;
repeated int32 matrix_r0c1 = 31;
repeated int32 matrix_r0c2 = 32;
repeated int32 matrix_r1c0 = 33;
repeated int32 matrix_r1c1 = 34;
repeated int32 matrix_r1c2 = 35;
repeated int32 matrix_r2c0 = 36;
repeated int32 matrix_r2c1 = 37;
repeated int32 matrix_r2c2 = 38;
repeated int32 output_bias_0 = 39;
repeated int32 output_bias_1 = 40;
repeated int32 output_bias_2 = 41;
repeated int32 input_bias_0 = 42;
repeated int32 input_bias_1 = 43;
repeated int32 input_bias_2 = 44;
// [End] 静态AIPP参数
// The n number that is used for raw/rgbir data into f16 transformation.
// The transformation equation is x/(2^n). If set to 0, no transform is performed.
uint32 raw_rgbir_to_f16_n = 45;
}
message MultiShapeOpParams {
enum MultiShapeMode {
batch = 0; //动态batch
resolution = 1; //动态分辨率,扩展用
}
MultiShapeMode mode = 1; //算子模式
uint32 related_input_rank = 2; //新增算子插入到哪个输入
repeated uint32 batch_list = 11; //batch_list值,batch_list的个数是2到8之间
}
syntax = "proto3";
package domi;
message InsertNewOps {
repeated AippOpParams aipp_op = 1;
repeated MultiShapeOpParams multi_shape_op = 2;
}
message AippOpParams {
enum InputFormat {
UNDEFINED = 0;
YUV420SP_U8 = 1;
XRGB8888_U8 = 2;
RGB888_U8 = 3;
YUV400_U8 = 4;
NC1HWC0DI_FP16 = 5;
NC1HWC0DI_S8 = 6;
ARGB8888_U8 = 7;
YUYV_U8 = 8;
YUV422SP_U8 = 9;
AYUV444_U8 = 10;
RAW10 = 11;
RAW12 = 12;
RAW16 = 13;
RAW24 = 14;
RGB16 = 15;
RGB20 = 16;
RGB24 = 17;
RGB8_IR = 18;
RGB16_IR = 19;
RGB24_IR = 20;
}
enum AippMode {
undefined = 0;
static = 1;
dynamic = 2;
}
// AIPP模式,区分静态AIPP和动态AIPP
AippMode aipp_mode = 1;
// related_input_rank参数为必填,类型为整型,配置范围>=0, <=输入Data算子的个数,默认值为0。
// 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。
uint32 related_input_rank = 2;
// input_edge_idx参数为可选,类型为整型,配置范围为>=0。
// 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。
// 配置值 <= Data算子输出边的个数。
repeated uint32 input_edge_idx = 3;
// [Begin] 动态AIPP参数,配置静态AIPP时无效
uint32 max_src_image_size = 4;
// 是否支持旋转。默认不支持,开启支持旋转时,会有额外的空间和性能损失
bool support_rotation = 5;
// [End] 动态AIPP参数
// [Begin] 静态AIPP参数,配置动态AIPP时无效
InputFormat input_format = 51;
bool csc_switch = 52;
float cpadding_value = 53;
bool rbuv_swap_switch = 54;
bool ax_swap_switch = 55;
bool single_line_mode = 56;
int32 src_image_size_w = 57;
int32 src_image_size_h = 58;
bool crop = 59;
int32 load_start_pos_w = 60;
int32 load_start_pos_h = 61;
int32 crop_size_w = 62;
int32 crop_size_h = 63;
bool resize = 64;
int32 resize_output_w = 65;
int32 resize_output_h = 66;
bool padding = 67;
int32 left_padding_size = 68;
int32 right_padding_size = 69;
int32 top_padding_size = 70;
int32 bottom_padding_size = 71;
int32 mean_chn_0 = 10;
int32 mean_chn_1 = 11;
int32 mean_chn_2 = 12;
int32 mean_chn_3 = 19;
float min_chn_0 = 13;
float min_chn_1 = 14;
float min_chn_2 = 15;
float min_chn_3 = 20;
repeated float var_reci_chn_0 = 16;
repeated float var_reci_chn_1 = 17;
repeated float var_reci_chn_2 = 18;
repeated float var_reci_chn_3 = 21;
repeated int32 matrix_r0c0 = 30;
repeated int32 matrix_r0c1 = 31;
repeated int32 matrix_r0c2 = 32;
repeated int32 matrix_r1c0 = 33;
repeated int32 matrix_r1c1 = 34;
repeated int32 matrix_r1c2 = 35;
repeated int32 matrix_r2c0 = 36;
repeated int32 matrix_r2c1 = 37;
repeated int32 matrix_r2c2 = 38;
repeated int32 output_bias_0 = 39;
repeated int32 output_bias_1 = 40;
repeated int32 output_bias_2 = 41;
repeated int32 input_bias_0 = 42;
repeated int32 input_bias_1 = 43;
repeated int32 input_bias_2 = 44;
// [End] 静态AIPP参数
// The n number that is used for raw/rgbir data into f16 transformation.
// The transformation equation is x/(2^n). If set to 0, no transform is performed.
uint32 raw_rgbir_to_f16_n = 45;
}
message MultiShapeOpParams {
enum MultiShapeMode {
batch = 0; //动态batch
resolution = 1; //动态分辨率,扩展用
}
MultiShapeMode mode = 1; //算子模式
uint32 related_input_rank = 2; //新增算子插入到哪个输入
repeated uint32 batch_list = 11; //batch_list值,batch_list的个数是2到8之间
}

+ 3
- 3
parser/common/register_tbe.cc View File

@@ -19,7 +19,7 @@
#include <memory>
#include <string>
#include "common/debug/log.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/op/ge_op_utils.h"
#include "common/op_map.h"
#include "common/util.h"
@@ -79,7 +79,7 @@ bool OpRegistrationTbe::RegisterParser(const OpRegistrationData &reg_data) {
return false;
}
std::shared_ptr<TensorFlowCustomParserAdapter> tf_parser_adapter =
ge::MakeShared<TensorFlowCustomParserAdapter>();
ge::parser::MakeShared<TensorFlowCustomParserAdapter>();
if (tf_parser_adapter == nullptr) {
GELOGE(PARAM_INVALID, "Create tf parser adapter failed.");
return false;
@@ -95,7 +95,7 @@ bool OpRegistrationTbe::RegisterParser(const OpRegistrationData &reg_data) {
}
GELOGI("Register fusion custom op parser: %s", reg_data.GetOmOptype().c_str());
std::shared_ptr<TensorFlowFusionCustomParserAdapter> tf_fusion_parser_adapter =
ge::MakeShared<TensorFlowFusionCustomParserAdapter>();
ge::parser::MakeShared<TensorFlowFusionCustomParserAdapter>();
if (tf_fusion_parser_adapter == nullptr) {
GELOGE(PARAM_INVALID, "Create tf fusion parser adapter failed.");
return false;


+ 2
- 2
parser/common/thread_pool.h View File

@@ -32,7 +32,7 @@
#include "framework/common/ge_inner_error_codes.h"
#include "external/ge/ge_api_error_codes.h"
#include "graph/types.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"

namespace ge {
using ThreadTask = std::function<void()>;
@@ -53,7 +53,7 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY ThreadPool {
}

auto bindFunc = std::bind(std::forward<Func>(func), std::forward<Args>(args)...);
auto task = ge::MakeShared<std::packaged_task<retType()>>(bindFunc);
auto task = ge::parser::MakeShared<std::packaged_task<retType()>>(bindFunc);
if (task == nullptr) {
GELOGE(ge::FAILED, "Make shared failed.");
return fail_future;


+ 16
- 16
parser/func_to_graph/proto_python_rule.mk View File

@@ -1,17 +1,17 @@
include $(BUILD_SYSTEM)/base_rules.mk
FUNCTION_TO_GRAPH_OUT_TIMESTAMP := $(HOST_OUT_ROOT)/func_to_graph/.timestamp
PROTO_SRC_DIR = framework/domi/parser/func_to_graph/proto
PY_PROTO_BUILD_DIR = $(HOST_OUT_ROOT)/tmp/function_to_graph/proto
$(warning PRIVATE_PROTOC is $(PRIVATE_PROTOC))
$(warning protobuf_lib_dir is $(protobuf_lib_dir))
$(FUNCTION_TO_GRAPH_OUT_TIMESTAMP): $(PRIVATE_PROTOC)
mkdir -p $(PY_PROTO_BUILD_DIR)
LD_LIBRARY_PATH=$(protobuf_lib_dir):$$LD_LIBRARY_PATH $(PRIVATE_PROTOC) -I=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $(PROTO_SRC_DIR)/*.proto
$(LOCAL_BUILT_MODULE): $(FUNCTION_TO_GRAPH_OUT_TIMESTAMP)
mkdir -p $@
include $(BUILD_SYSTEM)/base_rules.mk
FUNCTION_TO_GRAPH_OUT_TIMESTAMP := $(HOST_OUT_ROOT)/func_to_graph/.timestamp
PROTO_SRC_DIR = framework/domi/parser/func_to_graph/proto
PY_PROTO_BUILD_DIR = $(HOST_OUT_ROOT)/tmp/function_to_graph/proto
$(warning PRIVATE_PROTOC is $(PRIVATE_PROTOC))
$(warning protobuf_lib_dir is $(protobuf_lib_dir))
$(FUNCTION_TO_GRAPH_OUT_TIMESTAMP): $(PRIVATE_PROTOC)
mkdir -p $(PY_PROTO_BUILD_DIR)
LD_LIBRARY_PATH=$(protobuf_lib_dir):$$LD_LIBRARY_PATH $(PRIVATE_PROTOC) -I=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $(PROTO_SRC_DIR)/*.proto
$(LOCAL_BUILT_MODULE): $(FUNCTION_TO_GRAPH_OUT_TIMESTAMP)
mkdir -p $@
cp -rf $(PY_PROTO_BUILD_DIR)/* $@

+ 0
- 1
parser/onnx/module.mk View File

@@ -43,7 +43,6 @@ LOCAL_SHARED_LIBRARIES := \
libparser_common \
libgraph \
libregister \
libge_common \

LOCAL_LDFLAGS := -lrt



+ 1
- 1
parser/onnx/onnx_constant_parser.cc View File

@@ -17,7 +17,7 @@
#include "onnx_constant_parser.h"
#include <map>
#include <vector>
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/util.h"
#include "framework/omg/parser/parser_inner_ctx.h"
#include "graph/ge_tensor.h"


+ 190
- 190
parser/proto/ge_ir.proto View File

@@ -1,190 +1,190 @@
syntax = "proto3";
package ge.proto;
enum DataType
{
DT_UNDEFINED = 0; // Used to indicate a DataType field has not been set.
DT_FLOAT = 1; // float type
DT_FLOAT16 = 2; // fp16 type
DT_INT8 = 3; // int8 type
DT_UINT8 = 4; // uint8 type
DT_INT16 = 5; // int16 type
DT_UINT16 = 6; // uint16 type
DT_INT32 = 7; //
DT_INT64 = 8; // int64 type
DT_UINT32 = 9; // unsigned int32
DT_UINT64 = 10; // unsigned int64
DT_BOOL = 11; // bool type
DT_DOUBLE = 12; // double type
DT_STRING = 13; // string type
DT_DUAL_SUB_INT8 = 14; /**< dual output int8 type */
DT_DUAL_SUB_UINT8 = 15; /**< dual output uint8 type */
DT_COMPLEX64 = 16; // complex64 type
DT_COMPLEX128 = 17; // complex128 type
DT_QINT8 = 18; // qint8 type
DT_QINT16 = 19; // qint16 type
DT_QINT32 = 20; // qint32 type
DT_QUINT8 = 21; // quint8 type
DT_QUINT16 = 22; // quint16 type
DT_RESOURCE = 23; // resource type
DT_STRING_REF = 24; // string_ref type
DT_DUAL = 25; /**< dual output type */
}
message AttrDef
{
message ListValue
{
enum ListValueType{
VT_LIST_NONE = 0;
VT_LIST_STRING = 1;
VT_LIST_INT = 2;
VT_LIST_FLOAT = 3;
VT_LIST_BOOL = 4;
VT_LIST_BYTES = 5;
VT_LIST_TENSOR_DESC = 6;
VT_LIST_TENSOR = 7;
VT_LIST_GRAPH = 8;
VT_LIST_NAMED_ATTRS = 9;
VT_LIST_DATA_TYPE = 10;
}
repeated bytes s = 2; // "list(string)"
repeated int64 i = 3; // "list(int)"
repeated float f = 4; // "list(float)"
repeated bool b = 5; // "list(bool)"
repeated bytes bt = 7;
repeated TensorDescriptor td = 8;
repeated TensorDef t = 9;
repeated GraphDef g = 10;
repeated NamedAttrs na = 11;
repeated int64 dt = 12; // list ge::DataType
ListValueType val_type = 20;
}
message ListListInt{
message ListInt{
repeated int64 list_i = 1; // list int
}
repeated ListInt list_list_i = 1; // list list int
}
oneof value
{
bytes s = 2; // "string"
int64 i = 3; // "int"
float f = 4; // "float"
bool b = 5; // "bool"
bytes bt = 7;
ListValue list = 1; // any "list(...)"
NamedAttrs func = 10; // Used to support attr nesting
TensorDescriptor td = 11; // GeTensorDesc type
TensorDef t = 12; // GeTensor type
GraphDef g = 13; // Graph type
ListListInt list_list_int = 14; // List List Int type
int64 dt = 15; // ge::DataType
}
}
// A list of attr names and their values. The whole list is attached
// with a string name. E.g., MatMul[T=float].
message NamedAttrs
{
string name = 1;
map<string, AttrDef> attr = 2;
}
// Shape / dimension description, using row-major order
message ShapeDef
{
repeated int64 dim = 1; // Size of each dimension
}
// Multidimensional data description
message TensorDescriptor
{
string name = 1; // Optional parameter, tensor name
DataType dtype = 2; // tensor datatype
ShapeDef shape = 3; // Shape / dimension
string layout = 4; // Tensor format, eg: "NCHW", "NHWC", "CHW", "ND"
bool has_out_attr = 9;
int64 size = 10;
int64 weight_size = 11;
bool reuse_input = 12;
bool output_tensor = 13;
string device_type = 14;
bool input_tensor =15;
int64 real_dim_cnt = 16;
int64 reuse_input_index = 17;
int64 data_offset = 18;
int64 cmps_size = 19;
string cmps_tab = 20;
int64 cmps_tab_offset = 21;
map<string, AttrDef> attr = 5; // Set of extra parameter fields
}
// GeTensor definition
message TensorDef
{
TensorDescriptor desc = 1; // Tensor description
bytes data = 2; // Tensor data
}
// Operator description
message OpDef
{
string name = 1; // name
string type = 2; // type
repeated string input = 5; // input original op name + outgoing index. op_name:index
map<string, AttrDef> attr = 10; // Set of operator parameter fields
bool has_out_attr = 20;
int64 id = 21;
int64 stream_id =22;
repeated string input_name = 23;
repeated string src_name = 24;
repeated int64 src_index = 25;
repeated string dst_name = 26;
repeated int64 dst_index = 27;
repeated int64 input_i = 28;
repeated int64 output_i = 29;
repeated int64 workspace = 30;
repeated int64 workspace_bytes = 31;
repeated bool is_input_const = 32;
repeated TensorDescriptor input_desc = 33;
repeated TensorDescriptor output_desc = 34;
repeated string subgraph_name = 35;
}
// Graph definition
message GraphDef
{
string name = 1; // name
repeated string input = 4; // Graph input
repeated string output = 5; // Graph output
repeated OpDef op = 6; // List of operators
map<string, AttrDef> attr = 11; // Extended field
}
// model definition
message ModelDef
{
string name = 1; // name
uint32 version = 2; // IR Proto verion
string custom_version = 3; // User model version number, passed in by user
repeated GraphDef graph = 7; // Graph definition,graph[0] represents the main diagram in modeldef
map<string, AttrDef> attr = 11; // Extended field
}
syntax = "proto3";
package ge.proto;
enum DataType
{
DT_UNDEFINED = 0; // Used to indicate a DataType field has not been set.
DT_FLOAT = 1; // float type
DT_FLOAT16 = 2; // fp16 type
DT_INT8 = 3; // int8 type
DT_UINT8 = 4; // uint8 type
DT_INT16 = 5; // int16 type
DT_UINT16 = 6; // uint16 type
DT_INT32 = 7; //
DT_INT64 = 8; // int64 type
DT_UINT32 = 9; // unsigned int32
DT_UINT64 = 10; // unsigned int64
DT_BOOL = 11; // bool type
DT_DOUBLE = 12; // double type
DT_STRING = 13; // string type
DT_DUAL_SUB_INT8 = 14; /**< dual output int8 type */
DT_DUAL_SUB_UINT8 = 15; /**< dual output uint8 type */
DT_COMPLEX64 = 16; // complex64 type
DT_COMPLEX128 = 17; // complex128 type
DT_QINT8 = 18; // qint8 type
DT_QINT16 = 19; // qint16 type
DT_QINT32 = 20; // qint32 type
DT_QUINT8 = 21; // quint8 type
DT_QUINT16 = 22; // quint16 type
DT_RESOURCE = 23; // resource type
DT_STRING_REF = 24; // string_ref type
DT_DUAL = 25; /**< dual output type */
}
message AttrDef
{
message ListValue
{
enum ListValueType{
VT_LIST_NONE = 0;
VT_LIST_STRING = 1;
VT_LIST_INT = 2;
VT_LIST_FLOAT = 3;
VT_LIST_BOOL = 4;
VT_LIST_BYTES = 5;
VT_LIST_TENSOR_DESC = 6;
VT_LIST_TENSOR = 7;
VT_LIST_GRAPH = 8;
VT_LIST_NAMED_ATTRS = 9;
VT_LIST_DATA_TYPE = 10;
}
repeated bytes s = 2; // "list(string)"
repeated int64 i = 3; // "list(int)"
repeated float f = 4; // "list(float)"
repeated bool b = 5; // "list(bool)"
repeated bytes bt = 7;
repeated TensorDescriptor td = 8;
repeated TensorDef t = 9;
repeated GraphDef g = 10;
repeated NamedAttrs na = 11;
repeated int64 dt = 12; // list ge::DataType
ListValueType val_type = 20;
}
message ListListInt{
message ListInt{
repeated int64 list_i = 1; // list int
}
repeated ListInt list_list_i = 1; // list list int
}
oneof value
{
bytes s = 2; // "string"
int64 i = 3; // "int"
float f = 4; // "float"
bool b = 5; // "bool"
bytes bt = 7;
ListValue list = 1; // any "list(...)"
NamedAttrs func = 10; // Used to support attr nesting
TensorDescriptor td = 11; // GeTensorDesc type
TensorDef t = 12; // GeTensor type
GraphDef g = 13; // Graph type
ListListInt list_list_int = 14; // List List Int type
int64 dt = 15; // ge::DataType
}
}
// A list of attr names and their values. The whole list is attached
// with a string name. E.g., MatMul[T=float].
message NamedAttrs
{
string name = 1;
map<string, AttrDef> attr = 2;
}
// Shape / dimension description, using row-major order
message ShapeDef
{
repeated int64 dim = 1; // Size of each dimension
}
// Multidimensional data description
message TensorDescriptor
{
string name = 1; // Optional parameter, tensor name
DataType dtype = 2; // tensor datatype
ShapeDef shape = 3; // Shape / dimension
string layout = 4; // Tensor format, eg: "NCHW", "NHWC", "CHW", "ND"
bool has_out_attr = 9;
int64 size = 10;
int64 weight_size = 11;
bool reuse_input = 12;
bool output_tensor = 13;
string device_type = 14;
bool input_tensor =15;
int64 real_dim_cnt = 16;
int64 reuse_input_index = 17;
int64 data_offset = 18;
int64 cmps_size = 19;
string cmps_tab = 20;
int64 cmps_tab_offset = 21;
map<string, AttrDef> attr = 5; // Set of extra parameter fields
}
// GeTensor definition
message TensorDef
{
TensorDescriptor desc = 1; // Tensor description
bytes data = 2; // Tensor data
}
// Operator description
message OpDef
{
string name = 1; // name
string type = 2; // type
repeated string input = 5; // input original op name + outgoing index. op_name:index
map<string, AttrDef> attr = 10; // Set of operator parameter fields
bool has_out_attr = 20;
int64 id = 21;
int64 stream_id =22;
repeated string input_name = 23;
repeated string src_name = 24;
repeated int64 src_index = 25;
repeated string dst_name = 26;
repeated int64 dst_index = 27;
repeated int64 input_i = 28;
repeated int64 output_i = 29;
repeated int64 workspace = 30;
repeated int64 workspace_bytes = 31;
repeated bool is_input_const = 32;
repeated TensorDescriptor input_desc = 33;
repeated TensorDescriptor output_desc = 34;
repeated string subgraph_name = 35;
}
// Graph definition
message GraphDef
{
string name = 1; // name
repeated string input = 4; // Graph input
repeated string output = 5; // Graph output
repeated OpDef op = 6; // List of operators
map<string, AttrDef> attr = 11; // Extended field
}
// model definition
message ModelDef
{
string name = 1; // name
uint32 version = 2; // IR Proto verion
string custom_version = 3; // User model version number, passed in by user
repeated GraphDef graph = 7; // Graph definition,graph[0] represents the main diagram in modeldef
map<string, AttrDef> attr = 11; // Extended field
}

+ 136
- 136
parser/proto/insert_op.proto View File

@@ -1,136 +1,136 @@
syntax = "proto3";
package domi;
message InsertNewOps {
repeated AippOpParams aipp_op = 1;
repeated MultiShapeOpParams multi_shape_op = 2;
}
message AippOpParams {
enum InputFormat {
UNDEFINED = 0;
YUV420SP_U8 = 1;
XRGB8888_U8 = 2;
RGB888_U8 = 3;
YUV400_U8 = 4;
NC1HWC0DI_FP16 = 5;
NC1HWC0DI_S8 = 6;
ARGB8888_U8 = 7;
YUYV_U8 = 8;
YUV422SP_U8 = 9;
AYUV444_U8 = 10;
RAW10 = 11;
RAW12 = 12;
RAW16 = 13;
RAW24 = 14;
RGB16 = 15;
RGB20 = 16;
RGB24 = 17;
RGB8_IR = 18;
RGB16_IR = 19;
RGB24_IR = 20;
}
enum AippMode {
undefined = 0;
static = 1;
dynamic = 2;
}
// AIPP模式,区分静态AIPP和动态AIPP
AippMode aipp_mode = 1;
// related_input_rank参数为必填,类型为整型,配置范围>=0, <=输入Data算子的个数,默认值为0。
// 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。
uint32 related_input_rank = 2;
// input_edge_idx参数为可选,类型为整型,配置范围为>=0。
// 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。
// 配置值 <= Data算子输出边的个数。
repeated uint32 input_edge_idx = 3;
// [Begin] 动态AIPP参数,配置静态AIPP时无效
uint32 max_src_image_size = 4;
// 是否支持旋转。默认不支持,开启支持旋转时,会有额外的空间和性能损失
bool support_rotation = 5;
// [End] 动态AIPP参数
// [Begin] 静态AIPP参数,配置动态AIPP时无效
InputFormat input_format = 51;
bool csc_switch = 52;
float cpadding_value = 53;
bool rbuv_swap_switch = 54;
bool ax_swap_switch = 55;
bool single_line_mode = 56;
int32 src_image_size_w = 57;
int32 src_image_size_h = 58;
bool crop = 59;
int32 load_start_pos_w = 60;
int32 load_start_pos_h = 61;
int32 crop_size_w = 62;
int32 crop_size_h = 63;
bool resize = 64;
int32 resize_output_w = 65;
int32 resize_output_h = 66;
bool padding = 67;
int32 left_padding_size = 68;
int32 right_padding_size = 69;
int32 top_padding_size = 70;
int32 bottom_padding_size = 71;
int32 mean_chn_0 = 10;
int32 mean_chn_1 = 11;
int32 mean_chn_2 = 12;
int32 mean_chn_3 = 19;
float min_chn_0 = 13;
float min_chn_1 = 14;
float min_chn_2 = 15;
float min_chn_3 = 20;
repeated float var_reci_chn_0 = 16;
repeated float var_reci_chn_1 = 17;
repeated float var_reci_chn_2 = 18;
repeated float var_reci_chn_3 = 21;
repeated int32 matrix_r0c0 = 30;
repeated int32 matrix_r0c1 = 31;
repeated int32 matrix_r0c2 = 32;
repeated int32 matrix_r1c0 = 33;
repeated int32 matrix_r1c1 = 34;
repeated int32 matrix_r1c2 = 35;
repeated int32 matrix_r2c0 = 36;
repeated int32 matrix_r2c1 = 37;
repeated int32 matrix_r2c2 = 38;
repeated int32 output_bias_0 = 39;
repeated int32 output_bias_1 = 40;
repeated int32 output_bias_2 = 41;
repeated int32 input_bias_0 = 42;
repeated int32 input_bias_1 = 43;
repeated int32 input_bias_2 = 44;
// [End] 静态AIPP参数
// The n number that is used for raw/rgbir data into f16 transformation.
// The transformation equation is x/(2^n). If set to 0, no transform is performed.
uint32 raw_rgbir_to_f16_n = 45;
}
message MultiShapeOpParams {
enum MultiShapeMode {
batch = 0; //动态batch
resolution = 1; //动态分辨率,扩展用
}
MultiShapeMode mode = 1; //算子模式
uint32 related_input_rank = 2; //新增算子插入到哪个输入
repeated uint32 batch_list = 11; //batch_list值,batch_list的个数是2到8之间
}
syntax = "proto3";
package domi;
message InsertNewOps {
repeated AippOpParams aipp_op = 1;
repeated MultiShapeOpParams multi_shape_op = 2;
}
message AippOpParams {
enum InputFormat {
UNDEFINED = 0;
YUV420SP_U8 = 1;
XRGB8888_U8 = 2;
RGB888_U8 = 3;
YUV400_U8 = 4;
NC1HWC0DI_FP16 = 5;
NC1HWC0DI_S8 = 6;
ARGB8888_U8 = 7;
YUYV_U8 = 8;
YUV422SP_U8 = 9;
AYUV444_U8 = 10;
RAW10 = 11;
RAW12 = 12;
RAW16 = 13;
RAW24 = 14;
RGB16 = 15;
RGB20 = 16;
RGB24 = 17;
RGB8_IR = 18;
RGB16_IR = 19;
RGB24_IR = 20;
}
enum AippMode {
undefined = 0;
static = 1;
dynamic = 2;
}
// AIPP模式,区分静态AIPP和动态AIPP
AippMode aipp_mode = 1;
// related_input_rank参数为必填,类型为整型,配置范围>=0, <=输入Data算子的个数,默认值为0。
// 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。
uint32 related_input_rank = 2;
// input_edge_idx参数为可选,类型为整型,配置范围为>=0。
// 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。
// 配置值 <= Data算子输出边的个数。
repeated uint32 input_edge_idx = 3;
// [Begin] 动态AIPP参数,配置静态AIPP时无效
uint32 max_src_image_size = 4;
// 是否支持旋转。默认不支持,开启支持旋转时,会有额外的空间和性能损失
bool support_rotation = 5;
// [End] 动态AIPP参数
// [Begin] 静态AIPP参数,配置动态AIPP时无效
InputFormat input_format = 51;
bool csc_switch = 52;
float cpadding_value = 53;
bool rbuv_swap_switch = 54;
bool ax_swap_switch = 55;
bool single_line_mode = 56;
int32 src_image_size_w = 57;
int32 src_image_size_h = 58;
bool crop = 59;
int32 load_start_pos_w = 60;
int32 load_start_pos_h = 61;
int32 crop_size_w = 62;
int32 crop_size_h = 63;
bool resize = 64;
int32 resize_output_w = 65;
int32 resize_output_h = 66;
bool padding = 67;
int32 left_padding_size = 68;
int32 right_padding_size = 69;
int32 top_padding_size = 70;
int32 bottom_padding_size = 71;
int32 mean_chn_0 = 10;
int32 mean_chn_1 = 11;
int32 mean_chn_2 = 12;
int32 mean_chn_3 = 19;
float min_chn_0 = 13;
float min_chn_1 = 14;
float min_chn_2 = 15;
float min_chn_3 = 20;
repeated float var_reci_chn_0 = 16;
repeated float var_reci_chn_1 = 17;
repeated float var_reci_chn_2 = 18;
repeated float var_reci_chn_3 = 21;
repeated int32 matrix_r0c0 = 30;
repeated int32 matrix_r0c1 = 31;
repeated int32 matrix_r0c2 = 32;
repeated int32 matrix_r1c0 = 33;
repeated int32 matrix_r1c1 = 34;
repeated int32 matrix_r1c2 = 35;
repeated int32 matrix_r2c0 = 36;
repeated int32 matrix_r2c1 = 37;
repeated int32 matrix_r2c2 = 38;
repeated int32 output_bias_0 = 39;
repeated int32 output_bias_1 = 40;
repeated int32 output_bias_2 = 41;
repeated int32 input_bias_0 = 42;
repeated int32 input_bias_1 = 43;
repeated int32 input_bias_2 = 44;
// [End] 静态AIPP参数
// The n number that is used for raw/rgbir data into f16 transformation.
// The transformation equation is x/(2^n). If set to 0, no transform is performed.
uint32 raw_rgbir_to_f16_n = 45;
}
message MultiShapeOpParams {
enum MultiShapeMode {
batch = 0; //动态batch
resolution = 1; //动态分辨率,扩展用
}
MultiShapeMode mode = 1; //算子模式
uint32 related_input_rank = 2; //新增算子插入到哪个输入
repeated uint32 batch_list = 11; //batch_list值,batch_list的个数是2到8之间
}

+ 2
- 2
parser/tensorflow/graph_optimizer.cc View File

@@ -23,7 +23,7 @@
#include "cce/cce.h"
#include "cce/dnn.h"
#include "common/debug/log.h"
#include "common/math/math_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/op/ge_op_utils.h"
#include "common/op_map.h"
#include "common/types_map.h"
@@ -807,7 +807,7 @@ Status CreateNodeDefBytes(ge::NodePtr n, string originalType, map<string, PIOLis
for (uint32_t j = 0; j < ge_desc->GetShape().GetDimNum(); ++j) {
tmp_dim = ge_desc->GetShape().GetDim(j);
GE_CHECK_GE(tmp_dim, 0);
FMK_INT64_MULCHECK(real_size, tmp_dim);
PARSER_INT64_MULCHECK(real_size, tmp_dim);
real_size *= tmp_dim;
}
ge::TensorUtils::SetSize(*ge_desc, real_size * size_type);


+ 190
- 190
parser/tensorflow/proto/ge_ir.proto View File

@@ -1,190 +1,190 @@
syntax = "proto3";
package ge.proto;
enum DataType
{
DT_UNDEFINED = 0; // Used to indicate a DataType field has not been set.
DT_FLOAT = 1; // float type
DT_FLOAT16 = 2; // fp16 type
DT_INT8 = 3; // int8 type
DT_UINT8 = 4; // uint8 type
DT_INT16 = 5; // int16 type
DT_UINT16 = 6; // uint16 type
DT_INT32 = 7; //
DT_INT64 = 8; // int64 type
DT_UINT32 = 9; // unsigned int32
DT_UINT64 = 10; // unsigned int64
DT_BOOL = 11; // bool type
DT_DOUBLE = 12; // double type
DT_STRING = 13; // string type
DT_DUAL_SUB_INT8 = 14; /**< dual output int8 type */
DT_DUAL_SUB_UINT8 = 15; /**< dual output uint8 type */
DT_COMPLEX64 = 16; // complex64 type
DT_COMPLEX128 = 17; // complex128 type
DT_QINT8 = 18; // qint8 type
DT_QINT16 = 19; // qint16 type
DT_QINT32 = 20; // qint32 type
DT_QUINT8 = 21; // quint8 type
DT_QUINT16 = 22; // quint16 type
DT_RESOURCE = 23; // resource type
DT_STRING_REF = 24; // string_ref type
DT_DUAL = 25; /**< dual output type */
}
message AttrDef
{
message ListValue
{
enum ListValueType{
VT_LIST_NONE = 0;
VT_LIST_STRING = 1;
VT_LIST_INT = 2;
VT_LIST_FLOAT = 3;
VT_LIST_BOOL = 4;
VT_LIST_BYTES = 5;
VT_LIST_TENSOR_DESC = 6;
VT_LIST_TENSOR = 7;
VT_LIST_GRAPH = 8;
VT_LIST_NAMED_ATTRS = 9;
VT_LIST_DATA_TYPE = 10;
}
repeated bytes s = 2; // "list(string)"
repeated int64 i = 3; // "list(int)"
repeated float f = 4; // "list(float)"
repeated bool b = 5; // "list(bool)"
repeated bytes bt = 7;
repeated TensorDescriptor td = 8;
repeated TensorDef t = 9;
repeated GraphDef g = 10;
repeated NamedAttrs na = 11;
repeated int64 dt = 12; // list ge::DataType
ListValueType val_type = 20;
}
message ListListInt{
message ListInt{
repeated int64 list_i = 1; // list int
}
repeated ListInt list_list_i = 1; // list list int
}
oneof value
{
bytes s = 2; // "string"
int64 i = 3; // "int"
float f = 4; // "float"
bool b = 5; // "bool"
bytes bt = 7;
ListValue list = 1; // any "list(...)"
NamedAttrs func = 10; // Used to support attr nesting
TensorDescriptor td = 11; // GeTensorDesc type
TensorDef t = 12; // GeTensor type
GraphDef g = 13; // Graph type
ListListInt list_list_int = 14; // List List Int type
int64 dt = 15; // ge::DataType
}
}
// A list of attr names and their values. The whole list is attached
// with a string name. E.g., MatMul[T=float].
message NamedAttrs
{
string name = 1;
map<string, AttrDef> attr = 2;
}
// Shape / dimension description, using row-major order
message ShapeDef
{
repeated int64 dim = 1; // Size of each dimension
}
// Multidimensional data description
message TensorDescriptor
{
string name = 1; // Optional parameter, tensor name
DataType dtype = 2; // tensor datatype
ShapeDef shape = 3; // Shape / dimension
string layout = 4; // Tensor format, eg: "NCHW", "NHWC", "CHW", "ND"
bool has_out_attr = 9;
int64 size = 10;
int64 weight_size = 11;
bool reuse_input = 12;
bool output_tensor = 13;
string device_type = 14;
bool input_tensor =15;
int64 real_dim_cnt = 16;
int64 reuse_input_index = 17;
int64 data_offset = 18;
int64 cmps_size = 19;
string cmps_tab = 20;
int64 cmps_tab_offset = 21;
map<string, AttrDef> attr = 5; // Set of extra parameter fields
}
// GeTensor definition
message TensorDef
{
TensorDescriptor desc = 1; // Tensor description
bytes data = 2; // Tensor data
}
// Operator description
message OpDef
{
string name = 1; // name
string type = 2; // type
repeated string input = 5; // input original op name + outgoing index. op_name:index
map<string, AttrDef> attr = 10; // Set of operator parameter fields
bool has_out_attr = 20;
int64 id = 21;
int64 stream_id =22;
repeated string input_name = 23;
repeated string src_name = 24;
repeated int64 src_index = 25;
repeated string dst_name = 26;
repeated int64 dst_index = 27;
repeated int64 input_i = 28;
repeated int64 output_i = 29;
repeated int64 workspace = 30;
repeated int64 workspace_bytes = 31;
repeated bool is_input_const = 32;
repeated TensorDescriptor input_desc = 33;
repeated TensorDescriptor output_desc = 34;
repeated string subgraph_name = 35;
}
// Graph definition
message GraphDef
{
string name = 1; // name
repeated string input = 4; // Graph input
repeated string output = 5; // Graph output
repeated OpDef op = 6; // List of operators
map<string, AttrDef> attr = 11; // Extended field
}
// model definition
message ModelDef
{
string name = 1; // name
uint32 version = 2; // IR Proto verion
string custom_version = 3; // User model version number, passed in by user
repeated GraphDef graph = 7; // Graph definition,graph[0] represents the main diagram in modeldef
map<string, AttrDef> attr = 11; // Extended field
}
syntax = "proto3";
package ge.proto;
enum DataType
{
DT_UNDEFINED = 0; // Used to indicate a DataType field has not been set.
DT_FLOAT = 1; // float type
DT_FLOAT16 = 2; // fp16 type
DT_INT8 = 3; // int8 type
DT_UINT8 = 4; // uint8 type
DT_INT16 = 5; // int16 type
DT_UINT16 = 6; // uint16 type
DT_INT32 = 7; //
DT_INT64 = 8; // int64 type
DT_UINT32 = 9; // unsigned int32
DT_UINT64 = 10; // unsigned int64
DT_BOOL = 11; // bool type
DT_DOUBLE = 12; // double type
DT_STRING = 13; // string type
DT_DUAL_SUB_INT8 = 14; /**< dual output int8 type */
DT_DUAL_SUB_UINT8 = 15; /**< dual output uint8 type */
DT_COMPLEX64 = 16; // complex64 type
DT_COMPLEX128 = 17; // complex128 type
DT_QINT8 = 18; // qint8 type
DT_QINT16 = 19; // qint16 type
DT_QINT32 = 20; // qint32 type
DT_QUINT8 = 21; // quint8 type
DT_QUINT16 = 22; // quint16 type
DT_RESOURCE = 23; // resource type
DT_STRING_REF = 24; // string_ref type
DT_DUAL = 25; /**< dual output type */
}
message AttrDef
{
message ListValue
{
enum ListValueType{
VT_LIST_NONE = 0;
VT_LIST_STRING = 1;
VT_LIST_INT = 2;
VT_LIST_FLOAT = 3;
VT_LIST_BOOL = 4;
VT_LIST_BYTES = 5;
VT_LIST_TENSOR_DESC = 6;
VT_LIST_TENSOR = 7;
VT_LIST_GRAPH = 8;
VT_LIST_NAMED_ATTRS = 9;
VT_LIST_DATA_TYPE = 10;
}
repeated bytes s = 2; // "list(string)"
repeated int64 i = 3; // "list(int)"
repeated float f = 4; // "list(float)"
repeated bool b = 5; // "list(bool)"
repeated bytes bt = 7;
repeated TensorDescriptor td = 8;
repeated TensorDef t = 9;
repeated GraphDef g = 10;
repeated NamedAttrs na = 11;
repeated int64 dt = 12; // list ge::DataType
ListValueType val_type = 20;
}
message ListListInt{
message ListInt{
repeated int64 list_i = 1; // list int
}
repeated ListInt list_list_i = 1; // list list int
}
oneof value
{
bytes s = 2; // "string"
int64 i = 3; // "int"
float f = 4; // "float"
bool b = 5; // "bool"
bytes bt = 7;
ListValue list = 1; // any "list(...)"
NamedAttrs func = 10; // Used to support attr nesting
TensorDescriptor td = 11; // GeTensorDesc type
TensorDef t = 12; // GeTensor type
GraphDef g = 13; // Graph type
ListListInt list_list_int = 14; // List List Int type
int64 dt = 15; // ge::DataType
}
}
// A list of attr names and their values. The whole list is attached
// with a string name. E.g., MatMul[T=float].
message NamedAttrs
{
string name = 1;
map<string, AttrDef> attr = 2;
}
// Shape / dimension description, using row-major order
message ShapeDef
{
repeated int64 dim = 1; // Size of each dimension
}
// Multidimensional data description
message TensorDescriptor
{
string name = 1; // Optional parameter, tensor name
DataType dtype = 2; // tensor datatype
ShapeDef shape = 3; // Shape / dimension
string layout = 4; // Tensor format, eg: "NCHW", "NHWC", "CHW", "ND"
bool has_out_attr = 9;
int64 size = 10;
int64 weight_size = 11;
bool reuse_input = 12;
bool output_tensor = 13;
string device_type = 14;
bool input_tensor =15;
int64 real_dim_cnt = 16;
int64 reuse_input_index = 17;
int64 data_offset = 18;
int64 cmps_size = 19;
string cmps_tab = 20;
int64 cmps_tab_offset = 21;
map<string, AttrDef> attr = 5; // Set of extra parameter fields
}
// GeTensor definition
message TensorDef
{
TensorDescriptor desc = 1; // Tensor description
bytes data = 2; // Tensor data
}
// Operator description
message OpDef
{
string name = 1; // name
string type = 2; // type
repeated string input = 5; // input original op name + outgoing index. op_name:index
map<string, AttrDef> attr = 10; // Set of operator parameter fields
bool has_out_attr = 20;
int64 id = 21;
int64 stream_id =22;
repeated string input_name = 23;
repeated string src_name = 24;
repeated int64 src_index = 25;
repeated string dst_name = 26;
repeated int64 dst_index = 27;
repeated int64 input_i = 28;
repeated int64 output_i = 29;
repeated int64 workspace = 30;
repeated int64 workspace_bytes = 31;
repeated bool is_input_const = 32;
repeated TensorDescriptor input_desc = 33;
repeated TensorDescriptor output_desc = 34;
repeated string subgraph_name = 35;
}
// Graph definition
message GraphDef
{
string name = 1; // name
repeated string input = 4; // Graph input
repeated string output = 5; // Graph output
repeated OpDef op = 6; // List of operators
map<string, AttrDef> attr = 11; // Extended field
}
// model definition
message ModelDef
{
string name = 1; // name
uint32 version = 2; // IR Proto verion
string custom_version = 3; // User model version number, passed in by user
repeated GraphDef graph = 7; // Graph definition,graph[0] represents the main diagram in modeldef
map<string, AttrDef> attr = 11; // Extended field
}

+ 136
- 136
parser/tensorflow/proto/insert_op.proto View File

@@ -1,136 +1,136 @@
syntax = "proto3";
package domi;
message InsertNewOps {
repeated AippOpParams aipp_op = 1;
repeated MultiShapeOpParams multi_shape_op = 2;
}
message AippOpParams {
enum InputFormat {
UNDEFINED = 0;
YUV420SP_U8 = 1;
XRGB8888_U8 = 2;
RGB888_U8 = 3;
YUV400_U8 = 4;
NC1HWC0DI_FP16 = 5;
NC1HWC0DI_S8 = 6;
ARGB8888_U8 = 7;
YUYV_U8 = 8;
YUV422SP_U8 = 9;
AYUV444_U8 = 10;
RAW10 = 11;
RAW12 = 12;
RAW16 = 13;
RAW24 = 14;
RGB16 = 15;
RGB20 = 16;
RGB24 = 17;
RGB8_IR = 18;
RGB16_IR = 19;
RGB24_IR = 20;
}
enum AippMode {
undefined = 0;
static = 1;
dynamic = 2;
}
// AIPP模式,区分静态AIPP和动态AIPP
AippMode aipp_mode = 1;
// related_input_rank参数为必填,类型为整型,配置范围>=0, <=输入Data算子的个数,默认值为0。
// 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。
uint32 related_input_rank = 2;
// input_edge_idx参数为可选,类型为整型,配置范围为>=0。
// 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。
// 配置值 <= Data算子输出边的个数。
repeated uint32 input_edge_idx = 3;
// [Begin] 动态AIPP参数,配置静态AIPP时无效
uint32 max_src_image_size = 4;
// 是否支持旋转。默认不支持,开启支持旋转时,会有额外的空间和性能损失
bool support_rotation = 5;
// [End] 动态AIPP参数
// [Begin] 静态AIPP参数,配置动态AIPP时无效
InputFormat input_format = 51;
bool csc_switch = 52;
float cpadding_value = 53;
bool rbuv_swap_switch = 54;
bool ax_swap_switch = 55;
bool single_line_mode = 56;
int32 src_image_size_w = 57;
int32 src_image_size_h = 58;
bool crop = 59;
int32 load_start_pos_w = 60;
int32 load_start_pos_h = 61;
int32 crop_size_w = 62;
int32 crop_size_h = 63;
bool resize = 64;
int32 resize_output_w = 65;
int32 resize_output_h = 66;
bool padding = 67;
int32 left_padding_size = 68;
int32 right_padding_size = 69;
int32 top_padding_size = 70;
int32 bottom_padding_size = 71;
int32 mean_chn_0 = 10;
int32 mean_chn_1 = 11;
int32 mean_chn_2 = 12;
int32 mean_chn_3 = 19;
float min_chn_0 = 13;
float min_chn_1 = 14;
float min_chn_2 = 15;
float min_chn_3 = 20;
repeated float var_reci_chn_0 = 16;
repeated float var_reci_chn_1 = 17;
repeated float var_reci_chn_2 = 18;
repeated float var_reci_chn_3 = 21;
repeated int32 matrix_r0c0 = 30;
repeated int32 matrix_r0c1 = 31;
repeated int32 matrix_r0c2 = 32;
repeated int32 matrix_r1c0 = 33;
repeated int32 matrix_r1c1 = 34;
repeated int32 matrix_r1c2 = 35;
repeated int32 matrix_r2c0 = 36;
repeated int32 matrix_r2c1 = 37;
repeated int32 matrix_r2c2 = 38;
repeated int32 output_bias_0 = 39;
repeated int32 output_bias_1 = 40;
repeated int32 output_bias_2 = 41;
repeated int32 input_bias_0 = 42;
repeated int32 input_bias_1 = 43;
repeated int32 input_bias_2 = 44;
// [End] 静态AIPP参数
// The n number that is used for raw/rgbir data into f16 transformation.
// The transformation equation is x/(2^n). If set to 0, no transform is performed.
uint32 raw_rgbir_to_f16_n = 45;
}
message MultiShapeOpParams {
enum MultiShapeMode {
batch = 0; //动态batch
resolution = 1; //动态分辨率,扩展用
}
MultiShapeMode mode = 1; //算子模式
uint32 related_input_rank = 2; //新增算子插入到哪个输入
repeated uint32 batch_list = 11; //batch_list值,batch_list的个数是2到8之间
}
syntax = "proto3";
package domi;
message InsertNewOps {
repeated AippOpParams aipp_op = 1;
repeated MultiShapeOpParams multi_shape_op = 2;
}
message AippOpParams {
enum InputFormat {
UNDEFINED = 0;
YUV420SP_U8 = 1;
XRGB8888_U8 = 2;
RGB888_U8 = 3;
YUV400_U8 = 4;
NC1HWC0DI_FP16 = 5;
NC1HWC0DI_S8 = 6;
ARGB8888_U8 = 7;
YUYV_U8 = 8;
YUV422SP_U8 = 9;
AYUV444_U8 = 10;
RAW10 = 11;
RAW12 = 12;
RAW16 = 13;
RAW24 = 14;
RGB16 = 15;
RGB20 = 16;
RGB24 = 17;
RGB8_IR = 18;
RGB16_IR = 19;
RGB24_IR = 20;
}
enum AippMode {
undefined = 0;
static = 1;
dynamic = 2;
}
// AIPP模式,区分静态AIPP和动态AIPP
AippMode aipp_mode = 1;
// related_input_rank参数为必填,类型为整型,配置范围>=0, <=输入Data算子的个数,默认值为0。
// 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。
uint32 related_input_rank = 2;
// input_edge_idx参数为可选,类型为整型,配置范围为>=0。
// 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。
// 配置值 <= Data算子输出边的个数。
repeated uint32 input_edge_idx = 3;
// [Begin] 动态AIPP参数,配置静态AIPP时无效
uint32 max_src_image_size = 4;
// 是否支持旋转。默认不支持,开启支持旋转时,会有额外的空间和性能损失
bool support_rotation = 5;
// [End] 动态AIPP参数
// [Begin] 静态AIPP参数,配置动态AIPP时无效
InputFormat input_format = 51;
bool csc_switch = 52;
float cpadding_value = 53;
bool rbuv_swap_switch = 54;
bool ax_swap_switch = 55;
bool single_line_mode = 56;
int32 src_image_size_w = 57;
int32 src_image_size_h = 58;
bool crop = 59;
int32 load_start_pos_w = 60;
int32 load_start_pos_h = 61;
int32 crop_size_w = 62;
int32 crop_size_h = 63;
bool resize = 64;
int32 resize_output_w = 65;
int32 resize_output_h = 66;
bool padding = 67;
int32 left_padding_size = 68;
int32 right_padding_size = 69;
int32 top_padding_size = 70;
int32 bottom_padding_size = 71;
int32 mean_chn_0 = 10;
int32 mean_chn_1 = 11;
int32 mean_chn_2 = 12;
int32 mean_chn_3 = 19;
float min_chn_0 = 13;
float min_chn_1 = 14;
float min_chn_2 = 15;
float min_chn_3 = 20;
repeated float var_reci_chn_0 = 16;
repeated float var_reci_chn_1 = 17;
repeated float var_reci_chn_2 = 18;
repeated float var_reci_chn_3 = 21;
repeated int32 matrix_r0c0 = 30;
repeated int32 matrix_r0c1 = 31;
repeated int32 matrix_r0c2 = 32;
repeated int32 matrix_r1c0 = 33;
repeated int32 matrix_r1c1 = 34;
repeated int32 matrix_r1c2 = 35;
repeated int32 matrix_r2c0 = 36;
repeated int32 matrix_r2c1 = 37;
repeated int32 matrix_r2c2 = 38;
repeated int32 output_bias_0 = 39;
repeated int32 output_bias_1 = 40;
repeated int32 output_bias_2 = 41;
repeated int32 input_bias_0 = 42;
repeated int32 input_bias_1 = 43;
repeated int32 input_bias_2 = 44;
// [End] 静态AIPP参数
// The n number that is used for raw/rgbir data into f16 transformation.
// The transformation equation is x/(2^n). If set to 0, no transform is performed.
uint32 raw_rgbir_to_f16_n = 45;
}
message MultiShapeOpParams {
enum MultiShapeMode {
batch = 0; //动态batch
resolution = 1; //动态分辨率,扩展用
}
MultiShapeMode mode = 1; //算子模式
uint32 related_input_rank = 2; //新增算子插入到哪个输入
repeated uint32 batch_list = 11; //batch_list值,batch_list的个数是2到8之间
}

+ 2
- 2
parser/tensorflow/scope/scope_pass_manager.cc View File

@@ -15,7 +15,7 @@
*/

#include "parser/tensorflow/scope/scope_pass_manager.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/util.h"
#include "common/util/error_manager/error_manager.h"
#include "framework/common/debug/ge_log.h"
@@ -25,7 +25,7 @@
namespace ge {
shared_ptr<ScopeGraph> ScopePassManager::BuildScopeGraph(domi::tensorflow::GraphDef *graph_def) {
GE_CHK_BOOL_EXEC(graph_def != nullptr, return nullptr, "graph_def is nullptr");
scope_graph_ = ge::MakeShared<ScopeGraph>();
scope_graph_ = ge::parser::MakeShared<ScopeGraph>();
if (scope_graph_ == nullptr) {
GELOGE(FAILED, "Scope graph make shared failed.");
return nullptr;


+ 2
- 2
parser/tensorflow/tensorflow_constant_parser.cc View File

@@ -19,7 +19,7 @@
#include <memory>
#include <vector>
#include "common/debug/log.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/op/ge_op_utils.h"
#include "parser/common/op_def/constant_op.h"
#include "parser/common/op_def/ir_pb_converter.h"
@@ -68,7 +68,7 @@ Status TensorFlowConstantParser::ParseValue(const domi::tensorflow::NodeDef *nod

const domi::tensorflow::TensorProto &tensor = attr_value.tensor();

GeTensorPtr weight = ge::MakeShared<ge::GeTensor>();
GeTensorPtr weight = ge::parser::MakeShared<ge::GeTensor>();
GE_CHECK_NOTNULL(weight);
int64_t dataType = 0;
GE_CHK_BOOL_RET_STATUS(ge::AttrUtils::GetInt(opDesc, TENSORFLOW_ATTR_DTYPE, dataType), INTERNAL_ERROR,


+ 2
- 2
parser/tensorflow/tensorflow_fusion_op_parser.cc View File

@@ -17,7 +17,7 @@
#include "parser/tensorflow/tensorflow_fusion_op_parser.h"
#include <memory>
#include "common/debug/log.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/util.h"
#include "framework/common/debug/ge_log.h"
#include "omg/omg.h"
@@ -132,7 +132,7 @@ Status TensorFlowFusionOpParser::ParseWeightFromConst(const NodeDef *node_def, g
GE_CHECK_NOTNULL(node_def);
TensorProto tensor;
GE_CHK_STATUS_RET(GetTensorFromNode(node_def, tensor), "get tensor failed.");
weight = ge::MakeShared<ge::GeTensor>();
weight = ge::parser::MakeShared<ge::GeTensor>();
GE_CHECK_NOTNULL(weight);
domi::tensorflow::DataType data_type = tensor.dtype();
GE_CHK_STATUS_RET(


+ 7
- 7
parser/tensorflow/tensorflow_parser.cc View File

@@ -19,7 +19,7 @@
#include <iostream>
#include "parser/common/convert/pb2json.h"
#include "common/debug/log.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/util/error_manager/error_manager.h"
#include "external/graph/operator_factory.h"
#include "external/parser/tensorflow_parser.h"
@@ -99,7 +99,7 @@ graphStatus aclgrphParseTensorFlow(const char *model_file, ge::Graph &graph) {
(void)acl_graph_parse_util.AclParserInitialize(options);

// Create an empty computegraph
ge::ComputeGraphPtr compute_graph = ge::MakeShared<ge::ComputeGraph>("tmpGraph");
ge::ComputeGraphPtr compute_graph = ge::parser::MakeShared<ge::ComputeGraph>("tmpGraph");
GE_CHECK_NOTNULL(compute_graph);

graph = ge::GraphUtils::CreateGraphFromComputeGraph(compute_graph);
@@ -172,7 +172,7 @@ Status GenSubgraphParseTasks(const ge::ComputeGraphPtr &parent_graph, std::deque

// A function may be referenced multiple times in TF, change the graph name to ensure it is unique in GE
auto unique_name = node->GetName() + std::to_string(i) + subgraph_iname;
auto subgraph = ge::MakeShared<ge::ComputeGraph>(unique_name);
auto subgraph = ge::parser::MakeShared<ge::ComputeGraph>(unique_name);
if (subgraph == nullptr) {
GELOGE(OUT_OF_MEMORY, "Failed to alloc subgraph %s", subgraph_iname.c_str());
return OUT_OF_MEMORY;
@@ -246,7 +246,7 @@ Status TensorFlowModelParser::DefunToPartitionedCall(const domi::tensorflow::Nod
return FAILED;
}

op = ge::MakeShared<ge::OpDesc>(op_name, ge::parser::PARTITIONEDCALL);
op = ge::parser::MakeShared<ge::OpDesc>(op_name, ge::parser::PARTITIONEDCALL);
GE_CHECK_NOTNULL(op);

size_t input_tensor_num = 0;
@@ -284,7 +284,7 @@ Status TensorFlowModelParser::TransNodeToOpDesc(const domi::tensorflow::NodeDef
ge::Operator op_factory = ge::OperatorFactory::CreateOperator(node_name, op_type);
if (op_factory.GetName() != node_name || op_type == ge::parser::DATA) {
if (std::find(kMakeOperatorNotByIr.begin(), kMakeOperatorNotByIr.end(), op_type) != kMakeOperatorNotByIr.end()) {
op = ge::MakeShared<ge::OpDesc>(node_name, op_type);
op = ge::parser::MakeShared<ge::OpDesc>(node_name, op_type);
GE_CHECK_NOTNULL(op);
} else if (node_name == op_type) {
// Trans @tensorflow.python.framework.Defun(...) to PartitionedCall.
@@ -809,7 +809,7 @@ Status TensorFlowModelParser::ParseNodeDef(TensorFlowModelParser *parser, ge::Co
ge::Operator op_factory = ge::OperatorFactory::CreateOperator(node_name, op_type);
if (op_factory.GetName() != node_name) {
if (std::find(kMakeOperatorNotByIr.begin(), kMakeOperatorNotByIr.end(), op_type) != kMakeOperatorNotByIr.end()) {
op = ge::MakeShared<ge::OpDesc>(node_name, op_type);
op = ge::parser::MakeShared<ge::OpDesc>(node_name, op_type);
GE_CHECK_NOTNULL(op);
} else if (node_name == op_type) {
GE_RETURN_IF_ERROR(parser->DefunToPartitionedCall(node_def, op));
@@ -939,7 +939,7 @@ Status TensorFlowModelParser::AddFmkNode(ge::ComputeGraphPtr &graph, shared_ptr<
ThreadPool executor(kThreadNum);
std::mutex graphMutex;
std::vector<std::future<Status>> vectorFuture(op_node_list_size);
ge::ComputeGraphPtr graph_tmp = ge::MakeShared<ge::ComputeGraph>("tmpGraph");
ge::ComputeGraphPtr graph_tmp = ge::parser::MakeShared<ge::ComputeGraph>("tmpGraph");
GE_CHECK_NOTNULL(graph_tmp);
for (size_t j = 0; j < op_node_list_size; j++) {
const string op_node_name = op_node_name_list[j];


+ 3
- 3
parser/tensorflow/tensorflow_parser_register.h View File

@@ -25,7 +25,7 @@
#include "framework/omg/parser/op_parser.h"
#include "parser/common/op_def/ir_pb_converter.h"
#include "parser/common/op_def/operator.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "parser/common/op_parser_factory.h"
#include "parser/tensorflow/tensorflow_op_parser.h"
#include "proto/tensorflow/node_def.pb.h"
@@ -72,7 +72,7 @@ class TensorflowParserBuilder : public TensorflowWeightParserBuilder {
}

bool Finalize() override {
auto op_parser_adapter = ge::MakeShared<TensorflowOpParserAdapter<Param>>(*this);
auto op_parser_adapter = ge::parser::MakeShared<TensorflowOpParserAdapter<Param>>(*this);
if (op_parser_adapter == nullptr) {
GELOGE(FAILED, "Op parser adapter is null.");
}
@@ -102,7 +102,7 @@ class TensorflowOpParserAdapter : public TensorFlowOpParser {
Status ParseParams(const Message *op_src, ge::OpDescPtr &op_dest) override {
const domi::tensorflow::NodeDef *node = static_cast<const domi::tensorflow::NodeDef *>(op_src);
GE_CHECK_NOTNULL(node);
std::shared_ptr<Param> param = ge::MakeShared<Param>();
std::shared_ptr<Param> param = ge::parser::MakeShared<Param>();
if (param == nullptr) {
GELOGE(domi::FAILED, "Param is null");
return domi::FAILED;


+ 2
- 2
parser/tensorflow/tensorflow_reshape_parser.cc View File

@@ -22,7 +22,7 @@
#include "graph/utils/type_utils.h"
#include "parser/common/op_parser_factory.h"
#include "parser/tensorflow/tensorflow_util.h"
#include "common/math/math_util.h"
#include "parser/common/acl_graph_parser_util.h"

using domi::TENSORFLOW;
using namespace ge::parser;
@@ -48,7 +48,7 @@ Status TensorFlowReshapeParser::ParseDesc(const domi::tensorflow::AttrValue &att
GE_IF_BOOL_EXEC(tmp_dim < 0, real_size = tmp_dim * (-1) * real_size; continue;);
real_size *= tmp_dim;
}
FMK_INT64_MULCHECK(real_size, size_type);
PARSER_INT64_MULCHECK(real_size, size_type);
ge::TensorUtils::SetSize(ge_desc, real_size * size_type);
ge::TensorUtils::SetRealDimCnt(ge_desc, ge_desc.GetShape().GetDimNum());
GELOGI("after translate tf_desc, datatype: %s, format: %s, real size: %u, size_type: %u",


+ 3
- 3
parser/tensorflow/tensorflow_squeeze_parser.cc View File

@@ -25,7 +25,7 @@
#include "framework/omg/parser/parser_inner_ctx.h"
#include "graph/utils/type_utils.h"
#include "parser/common/op_parser_factory.h"
#include "common/math/math_util.h"
#include "parser/common/acl_graph_parser_util.h"

using domi::tensorflow::AttrValue;
using std::vector;
@@ -52,10 +52,10 @@ Status TensorFlowSqueezeParser::ParseDesc(const domi::tensorflow::AttrValue &att
for (uint32_t j = 0; j < ge_desc.GetShape().GetDimNum(); ++j) {
tmp_dim = ge_desc.GetShape().GetDim(j);
GE_IF_BOOL_EXEC(tmp_dim < 0, real_size = tmp_dim * (-1) * real_size; continue;);
FMK_INT64_MULCHECK(real_size, tmp_dim);
PARSER_INT64_MULCHECK(real_size, tmp_dim);
real_size *= tmp_dim;
}
FMK_INT64_MULCHECK(real_size, size_type);
PARSER_INT64_MULCHECK(real_size, size_type);
ge::TensorUtils::SetSize(ge_desc, real_size * size_type);
ge::TensorUtils::SetRealDimCnt(ge_desc, ge_desc.GetShape().GetDimNum());
GELOGD("after translate tf_desc, datatype: %s, format: %s, real size: %u, size_type: %u",


+ 0
- 1
parser/tensorflow/tensorflow_util.cc View File

@@ -19,7 +19,6 @@
#include <cstdlib>
#include <iostream>
#include <memory>
#include "common/math/math_util.h"
#include "framework/common/debug/ge_log.h"
#include "framework/common/debug/log.h"
#include "framework/common/op/ge_op_utils.h"


Loading…
Cancel
Save