Browse Source

update parser code

pull/2/head
wqtshg 5 years ago
parent
commit
33ae412b87
18 changed files with 146 additions and 72 deletions
  1. +4
    -3
      parser/caffe/caffe_custom_parser_adapter.cc
  2. +31
    -29
      parser/caffe/caffe_parser.cc
  3. +2
    -2
      parser/caffe/caffe_reshape_parser.cc
  4. +71
    -0
      parser/common/acl_graph_parser_util.h
  5. +4
    -4
      parser/common/data_op_parser.cc
  6. +4
    -4
      parser/common/op_parser_factory.h
  7. +3
    -3
      parser/common/register_tbe.cc
  8. +2
    -2
      parser/common/thread_pool.h
  9. +1
    -1
      parser/onnx/onnx_constant_parser.cc
  10. +3
    -2
      parser/tensorflow/graph_optimizer.cc
  11. +2
    -2
      parser/tensorflow/scope/scope_pass_manager.cc
  12. +2
    -2
      parser/tensorflow/tensorflow_constant_parser.cc
  13. +2
    -2
      parser/tensorflow/tensorflow_fusion_op_parser.cc
  14. +7
    -7
      parser/tensorflow/tensorflow_parser.cc
  15. +3
    -3
      parser/tensorflow/tensorflow_parser_register.h
  16. +2
    -2
      parser/tensorflow/tensorflow_reshape_parser.cc
  17. +3
    -3
      parser/tensorflow/tensorflow_squeeze_parser.cc
  18. +0
    -1
      parser/tensorflow/tensorflow_util.cc

+ 4
- 3
parser/caffe/caffe_custom_parser_adapter.cc View File

@@ -18,7 +18,7 @@
#include <memory>
#include <vector>
#include "common/debug/log.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/util.h"
#include "framework/common/debug/ge_log.h"
#include "framework/omg/omg_inner_types.h"
@@ -84,8 +84,9 @@ Status CaffeCustomParserAdapter::ParseWeights(const Message *op_src, ge::NodePtr

bool bias_en = false;
int start_pos = layer->bottom_size();
bool update_in_turn = (static_cast<int64_t>(op->GetAllInputsSize()) == (layer->bottom_size() + layer->blobs_size()));
for (int i = 0; i < layer->blobs_size(); ++i) {
ge::GeTensorPtr weight = ge::MakeShared<ge::GeTensor>();
ge::GeTensorPtr weight = ge::parser::MakeShared<ge::GeTensor>();
GE_CHECK_NOTNULL(weight);
GE_CHK_STATUS_RET(ConvertWeight(layer->blobs(i), layer->name(), weight), "Convert blobs(%d) for layer %s failed", i,
layer->name().c_str());
@@ -120,7 +121,7 @@ Status CaffeCustomParserAdapter::ParseWeights(const Message *op_src, ge::NodePtr
GE_CHECK_NOTNULL(const_node);
auto index = start_pos + i;
auto valid_input_name = op->GetValidInputNameByIndex(static_cast<uint32_t>(index));
if (valid_input_name.empty()) {
if (update_in_turn || valid_input_name.empty()) {
if (node->AddLinkFrom(static_cast<const uint32_t &>(index), const_node) != GRAPH_SUCCESS) {
GELOGE(GRAPH_FAILED, "AddEdge failed of from Node %s output to Node %s input %d", const_node->GetName().c_str(),
node->GetName().c_str(), index);


+ 31
- 29
parser/caffe/caffe_parser.cc View File

@@ -22,7 +22,7 @@
#include <memory>
#include "parser/common/convert/pb2json.h"
#include "common/debug/log.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/op_map.h"
#include "common/util/error_manager/error_manager.h"
#include "common/ge_types.h"
@@ -85,7 +85,7 @@ graphStatus aclgrphParseCaffe(const char *model_file, const char *weights_file,
(void)acl_graph_parse_util.AclParserInitialize(options);

// Create an empty computegraph
ge::ComputeGraphPtr compute_graph = ge::MakeShared<ge::ComputeGraph>("tmpGraph");
ge::ComputeGraphPtr compute_graph = ge::parser::MakeShared<ge::ComputeGraph>("tmpGraph");
GE_CHECK_NOTNULL(compute_graph);

graph = ge::GraphUtils::CreateGraphFromComputeGraph(compute_graph);
@@ -1119,7 +1119,7 @@ Status CaffeModelParser::AddTensorDescToOpDesc(ge::OpDescPtr &op_desc, const dom
Status CaffeModelParser::AddTensorDescToOpDescByIr(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer,
const string &op_type) {
if (std::find(kAddTensorIrSkipNodes.begin(), kAddTensorIrSkipNodes.end(), op_type) != kAddTensorIrSkipNodes.end()) {
op_desc = ge::MakeShared<ge::OpDesc>(layer.name(), op_type);
op_desc = ge::parser::MakeShared<ge::OpDesc>(layer.name(), op_type);
GE_CHECK_NOTNULL(op_desc);
Status ret = AddTensorDescToOpDesc(op_desc, layer);
if (ret != SUCCESS) {
@@ -1138,44 +1138,40 @@ Status CaffeModelParser::AddTensorDescToOpDescByIr(ge::OpDescPtr &op_desc, const
} else {
op_desc = ge::OpDescUtils::GetOpDescFromOperator(op_factory);
GE_CHECK_NOTNULL(op_desc);
auto valid_size = layer.bottom_size();
auto valid_input_size = layer.bottom_size();
auto blob_size = layer.blobs_size();
GELOGI("After GetOpDescFromOperator op[%s] type[%s] have all input size: %zu, caffe_input_size:%d output size: %zu",
op_desc->GetName().c_str(), op_desc->GetType().c_str(),
op_desc->GetAllInputsSize(), valid_size, op_desc->GetOutputsSize());
for (int i = 0; i < valid_size; i++) {
op_desc->GetAllInputsSize(), valid_input_size, op_desc->GetOutputsSize());
bool update_in_turn = (static_cast<int64_t>(op_desc->GetAllInputsSize()) == (valid_input_size + blob_size);
for (int i = 0; i < valid_input_size; i++) {
ge::GeTensorDesc input_tensor;
std::string input_name;
ge::graphStatus ret = ge::GRAPH_SUCCESS;
// Only two case is supported fow now when there are optional inputs
// Below cases are supported fow now when there are optional inputs
// x means optional, o means requierd input
// a. ooxxx, layer.bottom_size=number of o and x
// b. oxoxoxox, layer.bottom_size=number of o
if (static_cast<size_t>(i) >= op_desc->GetInputsSize()) {
ret = op_desc->UpdateInputDesc(static_cast<uint32_t>(i), input_tensor);
} else {
input_name = op_desc->GetValidInputNameByIndex(static_cast<uint32_t>(i));
ret = op_desc->UpdateInputDesc(input_name, input_tensor);
}

if (ret != ge::GRAPH_SUCCESS) {
GELOGW("op [%s], type[%s], update input(%d) with name %s failed", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), i, input_name.c_str());
// a. ooxxx, number of o and x>=layer.bottom_size+layer.blobs_size>=number of o
// b. oxoxoxox, layer.bottom_size+layer.blobs_size>=number of o
// c. oxoxoxox, layer.bottom_size+layer.blobs_size>=number of o and x
if (update_in_turn) {
ret = op_desc->UpdateInputDesc(op_desc->GetInputNameByIndex(static_cast<uint32_t>(i)), input_tensor);
} else {
GELOGI("op [%s], type[%s], update input(%d) with name %s success", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), i, input_name.c_str());
if (static_cast<size_t>(i) >= op_desc->GetInputsSize()) {
ret = op_desc->UpdateInputDesc(static_cast<uint32_t>(i), input_tensor);
} else {
input_name = op_desc->GetValidInputNameByIndex(static_cast<uint32_t>(i));
ret = op_desc->UpdateInputDesc(input_name, input_tensor);
}
}
GELOGI("op [%s], type[%s], update input(%d) with name %s %s", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), i, input_name.c_str(), ret == ge::GRAPH_SUCCESS ? "success" : "failed");
}

for (int i = 0; i < layer.top_size(); i++) {
ge::GeTensorDesc output_tensor;
ge::graphStatus ret = op_desc->UpdateOutputDesc(op_desc->GetOutputNameByIndex(i), output_tensor);
if (ret != ge::GRAPH_SUCCESS) {
GELOGW("op [%s], type[%s], update output(%d) with name %s failed", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), i, op_desc->GetOutputNameByIndex(i).c_str());
} else {
GELOGI("op [%s], type[%s], update output(%d) with name %s success", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), i, op_desc->GetOutputNameByIndex(i).c_str());
}
auto ret = op_desc->UpdateOutputDesc(op_desc->GetOutputNameByIndex(static_cast<uint32_t>(i)), output_tensor);
GELOGI("op [%s], type[%s], update output(%d) with name %s %s", op_desc->GetName().c_str(),
op_desc->GetType().c_str(), i, op_desc->GetOutputNameByIndex(i).c_str(), ret == ge::GRAPH_SUCCESS ? "success" : "failed");
}
}
return SUCCESS;
@@ -1226,6 +1222,12 @@ Status CaffeModelParser::AddEdges(ge::ComputeGraphPtr &graph) {
GELOGE(INTERNAL_ERROR, "Add link failed from op[%s] to op[%s].",
top_node_iter->second->GetName().c_str(), bottom_node_iter->second->GetName().c_str());
return INTERNAL_ERROR;);
auto op_desc = bottom_node_iter->second->GetOpDesc();
GE_CHECK_NOTNULL(op_desc);
auto out_op_desc = top_node_iter->second->GetOpDesc();
GE_CHECK_NOTNULL(out_op_desc);
(void) op_desc->UpdateInputDesc((static_cast<uint32_t>(in_archor_ptr->GetIdx())),
out_op_desc->GetOutputDesc(static_cast<uint32_t>(out_archor_ptr->GetIdx())));
}
GE_IF_BOOL_EXEC(top_node_iter == node_map.end(), ErrorManager::GetInstance().ATCReportErrMessage(
"E11014", {"opname"}, {top_blob_layer_pair.first});


+ 2
- 2
parser/caffe/caffe_reshape_parser.cc View File

@@ -17,7 +17,7 @@
#include "parser/caffe/caffe_reshape_parser.h"
#include <vector>
#include "common/debug/log.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/op/op_parser_util.h"
#include "common/util.h"
#include "framework/common/debug/ge_log.h"
@@ -109,7 +109,7 @@ Status CaffeReshapeParser::AddConstInput(ge::NodePtr &node) {
}

// construct GeTensorPtr
ge::GeTensorPtr constTensor = ge::MakeShared<ge::GeTensor>();
ge::GeTensorPtr constTensor = ge::parser::MakeShared<ge::GeTensor>();
GE_CHECK_NOTNULL(constTensor);
constTensor->SetTensorDesc(const_desc);



+ 71
- 0
parser/common/acl_graph_parser_util.h View File

@@ -137,6 +137,77 @@ bool ValidateStr(const std::string &filePath, const std::string &mode);
/// @return Time character string in the format: %Y%m%d%H%M%S, eg: 20171011083555
///
std::string CurrentTimeInStr();

template <typename T, typename... Args>
static inline std::shared_ptr<T> ComGraphMakeShared(Args &&... args) {
using T_nc = typename std::remove_const<T>::type;
std::shared_ptr<T> ret(new (std::nothrow) T_nc(std::forward<Args>(args)...));
return ret;
}

/// @ingroup math_util
/// @brief check whether int64 multiplication can result in overflow
/// @param [in] a multiplicator
/// @param [in] b multiplicator
/// @return Status
inline Status Int64MulCheckOverflow(int64_t a, int64_t b) {
if (a > 0) {
if (b > 0) {
if (a > (INT64_MAX / b)) {
return FAILED;
}
} else {
if (b < (INT64_MIN / a)) {
return FAILED;
}
}
} else {
if (b > 0) {
if (a < (INT64_MIN / b)) {
return FAILED;
}
} else {
if ((a != 0) && (b < (INT64_MAX / a))) {
return FAILED;
}
}
}
return SUCCESS;
}
/// @ingroup math_util
/// @brief check whether int64 multiplication can result in overflow
/// @param [in] a multiplicator
/// @param [in] b multiplicator
/// @return Status
inline Status CheckInt64Uint32MulOverflow(int64_t a, uint32_t b) {
if (a == 0 || b == 0) {
return SUCCESS;
}
if (a > 0) {
if (a > (INT64_MAX / b)) {
return FAILED;
}
} else {
if (a < (INT64_MIN / b)) {
return FAILED;
}
}
return SUCCESS;
}

#define PARSER_INT64_MULCHECK(a, b) \
if (ge::Int64MulCheckOverflow((a), (b)) != SUCCESS) { \
GELOGW("Int64 %ld and %ld multiplication can result in overflow!", static_cast<int64_t>(a), \
static_cast<int64_t>(b)); \
return INTERNAL_ERROR; \
}

#define PARSER_INT64_UINT32_MULCHECK(a, b) \
if (ge::CheckInt64Uint32MulOverflow((a), (b)) != SUCCESS) { \
GELOGW("Int64 %ld and UINT32 %u multiplication can result in overflow!", static_cast<uint32_t>(a), \
static_cast<uint32_t>(b)); \
return INTERNAL_ERROR; \
}
} // namespace parser
} // namespace ge



+ 4
- 4
parser/common/data_op_parser.cc View File

@@ -18,7 +18,7 @@
#include <cstdlib>
#include "common/debug/log.h"
#include "common/op/ge_op_utils.h"
#include "common/math/math_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/util.h"
#include "graph/utils/type_utils.h"
#include "omg/omg.h"
@@ -128,10 +128,10 @@ Status DataOpParser::InitNDTensor(const vector<int64_t> &shape, ge::DataType dat
}
uint32_t type_size = 0;
if (ge::TypeUtils::GetDataTypeLength(data_type, type_size)) {
FMK_INT64_UINT32_MULCHECK(size, type_size);
PARSER_INT64_UINT32_MULCHECK(size, type_size);
size *= type_size;
} else {
FMK_INT64_UINT32_MULCHECK(size, static_cast<uint32_t>(sizeof(float)));
PARSER_INT64_UINT32_MULCHECK(size, static_cast<uint32_t>(sizeof(float)));
size *= sizeof(float);
}
ge::TensorUtils::SetSize(tensor_desc, size);
@@ -169,7 +169,7 @@ Status DataOpParser::InitInputTensor(const vector<int64_t> &shape, ge::GeTensorD
if (input.GetShape().GetDim(0) != -1) {
size = input.GetShape().GetShapeSize();
}
FMK_INT64_UINT32_MULCHECK(size, static_cast<uint32_t>(sizeof(float)));
PARSER_INT64_UINT32_MULCHECK(size, static_cast<uint32_t>(sizeof(float)));
ge::TensorUtils::SetSize(input, size * sizeof(float));

return SUCCESS;


+ 4
- 4
parser/common/op_parser_factory.h View File

@@ -23,7 +23,7 @@
#include <mutex>
#include <string>
#include <vector>
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "framework/omg/parser/parser_types.h"
#include "framework/common/debug/ge_log.h"
#include "omg/omg_inner_types.h"
@@ -162,7 +162,7 @@ class CustomParserAdapterRegistrar {
*/
#define REGISTER_OP_PARSER_CREATOR(framework, op_type, clazz) \
std::shared_ptr<OpParser> Creator_##framework##_##op_type##_Op_Parser() { \
std::shared_ptr<clazz> ptr = ge::MakeShared<clazz>(); \
std::shared_ptr<clazz> ptr = ge::parser::MakeShared<clazz>(); \
if (ptr == nullptr) { \
GELOGW("MakeShared failed, result is nullptr."); \
} \
@@ -173,7 +173,7 @@ class CustomParserAdapterRegistrar {

#define REGISTER_FUSION_OP_PARSER_CREATOR(framework, op_type, clazz) \
std::shared_ptr<OpParser> Creator_##framework##_##op_type##_Fusion_Op_Parser() { \
std::shared_ptr<clazz> ptr = ge::MakeShared<clazz>(); \
std::shared_ptr<clazz> ptr = ge::parser::MakeShared<clazz>(); \
if (ptr == nullptr) { \
GELOGW("MakeShared failed, result is nullptr."); \
} \
@@ -187,7 +187,7 @@ class CustomParserAdapterRegistrar {
/// @param [in] clazz CaffeCustomParserAdapter adaptation class
#define REGISTER_CUSTOM_PARSER_ADAPTER_CREATOR(framework, clazz) \
std::shared_ptr<OpParser> Creator_##framework##_Op_Parser_Adapter() { \
std::shared_ptr<clazz> ptr = ge::MakeShared<clazz>(); \
std::shared_ptr<clazz> ptr = ge::parser::MakeShared<clazz>(); \
if (ptr == nullptr) { \
GELOGW("MakeShared failed, result is nullptr."); \
} \


+ 3
- 3
parser/common/register_tbe.cc View File

@@ -19,7 +19,7 @@
#include <memory>
#include <string>
#include "common/debug/log.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/op/ge_op_utils.h"
#include "common/op_map.h"
#include "common/util.h"
@@ -79,7 +79,7 @@ bool OpRegistrationTbe::RegisterParser(const OpRegistrationData &reg_data) {
return false;
}
std::shared_ptr<TensorFlowCustomParserAdapter> tf_parser_adapter =
ge::MakeShared<TensorFlowCustomParserAdapter>();
ge::parser::MakeShared<TensorFlowCustomParserAdapter>();
if (tf_parser_adapter == nullptr) {
GELOGE(PARAM_INVALID, "Create tf parser adapter failed.");
return false;
@@ -95,7 +95,7 @@ bool OpRegistrationTbe::RegisterParser(const OpRegistrationData &reg_data) {
}
GELOGI("Register fusion custom op parser: %s", reg_data.GetOmOptype().c_str());
std::shared_ptr<TensorFlowFusionCustomParserAdapter> tf_fusion_parser_adapter =
ge::MakeShared<TensorFlowFusionCustomParserAdapter>();
ge::parser::MakeShared<TensorFlowFusionCustomParserAdapter>();
if (tf_fusion_parser_adapter == nullptr) {
GELOGE(PARAM_INVALID, "Create tf fusion parser adapter failed.");
return false;


+ 2
- 2
parser/common/thread_pool.h View File

@@ -32,7 +32,7 @@
#include "framework/common/ge_inner_error_codes.h"
#include "external/ge/ge_api_error_codes.h"
#include "graph/types.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"

namespace ge {
using ThreadTask = std::function<void()>;
@@ -53,7 +53,7 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY ThreadPool {
}

auto bindFunc = std::bind(std::forward<Func>(func), std::forward<Args>(args)...);
auto task = ge::MakeShared<std::packaged_task<retType()>>(bindFunc);
auto task = ge::parser::MakeShared<std::packaged_task<retType()>>(bindFunc);
if (task == nullptr) {
GELOGE(ge::FAILED, "Make shared failed.");
return fail_future;


+ 1
- 1
parser/onnx/onnx_constant_parser.cc View File

@@ -17,7 +17,7 @@
#include "onnx_constant_parser.h"
#include <map>
#include <vector>
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/util.h"
#include "framework/omg/parser/parser_inner_ctx.h"
#include "graph/ge_tensor.h"


+ 3
- 2
parser/tensorflow/graph_optimizer.cc View File

@@ -23,7 +23,7 @@
#include "cce/cce.h"
#include "cce/dnn.h"
#include "common/debug/log.h"
#include "common/math/math_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/op/ge_op_utils.h"
#include "common/op_map.h"
#include "common/types_map.h"
@@ -807,7 +807,8 @@ Status CreateNodeDefBytes(ge::NodePtr n, string originalType, map<string, PIOLis
for (uint32_t j = 0; j < ge_desc->GetShape().GetDimNum(); ++j) {
tmp_dim = ge_desc->GetShape().GetDim(j);
GE_CHECK_GE(tmp_dim, 0);
FMK_INT64_MULCHECK(real_size, tmp_dim);
PARSER_INT64_MULCHECK(real_size, tmp_dim);
real_size *= tmp_dim;
}
ge::TensorUtils::SetSize(*ge_desc, real_size * size_type);


+ 2
- 2
parser/tensorflow/scope/scope_pass_manager.cc View File

@@ -15,7 +15,7 @@
*/

#include "parser/tensorflow/scope/scope_pass_manager.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/util.h"
#include "common/util/error_manager/error_manager.h"
#include "framework/common/debug/ge_log.h"
@@ -25,7 +25,7 @@
namespace ge {
shared_ptr<ScopeGraph> ScopePassManager::BuildScopeGraph(domi::tensorflow::GraphDef *graph_def) {
GE_CHK_BOOL_EXEC(graph_def != nullptr, return nullptr, "graph_def is nullptr");
scope_graph_ = ge::MakeShared<ScopeGraph>();
scope_graph_ = ge::parser::MakeShared<ScopeGraph>();
if (scope_graph_ == nullptr) {
GELOGE(FAILED, "Scope graph make shared failed.");
return nullptr;


+ 2
- 2
parser/tensorflow/tensorflow_constant_parser.cc View File

@@ -19,7 +19,7 @@
#include <memory>
#include <vector>
#include "common/debug/log.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/op/ge_op_utils.h"
#include "parser/common/op_def/constant_op.h"
#include "parser/common/op_def/ir_pb_converter.h"
@@ -68,7 +68,7 @@ Status TensorFlowConstantParser::ParseValue(const domi::tensorflow::NodeDef *nod

const domi::tensorflow::TensorProto &tensor = attr_value.tensor();

GeTensorPtr weight = ge::MakeShared<ge::GeTensor>();
GeTensorPtr weight = ge::parser::MakeShared<ge::GeTensor>();
GE_CHECK_NOTNULL(weight);
int64_t dataType = 0;
GE_CHK_BOOL_RET_STATUS(ge::AttrUtils::GetInt(opDesc, TENSORFLOW_ATTR_DTYPE, dataType), INTERNAL_ERROR,


+ 2
- 2
parser/tensorflow/tensorflow_fusion_op_parser.cc View File

@@ -17,7 +17,7 @@
#include "parser/tensorflow/tensorflow_fusion_op_parser.h"
#include <memory>
#include "common/debug/log.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/util.h"
#include "framework/common/debug/ge_log.h"
#include "omg/omg.h"
@@ -132,7 +132,7 @@ Status TensorFlowFusionOpParser::ParseWeightFromConst(const NodeDef *node_def, g
GE_CHECK_NOTNULL(node_def);
TensorProto tensor;
GE_CHK_STATUS_RET(GetTensorFromNode(node_def, tensor), "get tensor failed.");
weight = ge::MakeShared<ge::GeTensor>();
weight = ge::parser::MakeShared<ge::GeTensor>();
GE_CHECK_NOTNULL(weight);
domi::tensorflow::DataType data_type = tensor.dtype();
GE_CHK_STATUS_RET(


+ 7
- 7
parser/tensorflow/tensorflow_parser.cc View File

@@ -19,7 +19,7 @@
#include <iostream>
#include "parser/common/convert/pb2json.h"
#include "common/debug/log.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "common/util/error_manager/error_manager.h"
#include "external/graph/operator_factory.h"
#include "external/parser/tensorflow_parser.h"
@@ -99,7 +99,7 @@ graphStatus aclgrphParseTensorFlow(const char *model_file, ge::Graph &graph) {
(void)acl_graph_parse_util.AclParserInitialize(options);

// Create an empty computegraph
ge::ComputeGraphPtr compute_graph = ge::MakeShared<ge::ComputeGraph>("tmpGraph");
ge::ComputeGraphPtr compute_graph = ge::parser::MakeShared<ge::ComputeGraph>("tmpGraph");
GE_CHECK_NOTNULL(compute_graph);

graph = ge::GraphUtils::CreateGraphFromComputeGraph(compute_graph);
@@ -172,7 +172,7 @@ Status GenSubgraphParseTasks(const ge::ComputeGraphPtr &parent_graph, std::deque

// A function may be referenced multiple times in TF, change the graph name to ensure it is unique in GE
auto unique_name = node->GetName() + std::to_string(i) + subgraph_iname;
auto subgraph = ge::MakeShared<ge::ComputeGraph>(unique_name);
auto subgraph = ge::parser::MakeShared<ge::ComputeGraph>(unique_name);
if (subgraph == nullptr) {
GELOGE(OUT_OF_MEMORY, "Failed to alloc subgraph %s", subgraph_iname.c_str());
return OUT_OF_MEMORY;
@@ -246,7 +246,7 @@ Status TensorFlowModelParser::DefunToPartitionedCall(const domi::tensorflow::Nod
return FAILED;
}

op = ge::MakeShared<ge::OpDesc>(op_name, ge::parser::PARTITIONEDCALL);
op = ge::parser::MakeShared<ge::OpDesc>(op_name, ge::parser::PARTITIONEDCALL);
GE_CHECK_NOTNULL(op);

size_t input_tensor_num = 0;
@@ -284,7 +284,7 @@ Status TensorFlowModelParser::TransNodeToOpDesc(const domi::tensorflow::NodeDef
ge::Operator op_factory = ge::OperatorFactory::CreateOperator(node_name, op_type);
if (op_factory.GetName() != node_name || op_type == ge::parser::DATA) {
if (std::find(kMakeOperatorNotByIr.begin(), kMakeOperatorNotByIr.end(), op_type) != kMakeOperatorNotByIr.end()) {
op = ge::MakeShared<ge::OpDesc>(node_name, op_type);
op = ge::parser::MakeShared<ge::OpDesc>(node_name, op_type);
GE_CHECK_NOTNULL(op);
} else if (node_name == op_type) {
// Trans @tensorflow.python.framework.Defun(...) to PartitionedCall.
@@ -809,7 +809,7 @@ Status TensorFlowModelParser::ParseNodeDef(TensorFlowModelParser *parser, ge::Co
ge::Operator op_factory = ge::OperatorFactory::CreateOperator(node_name, op_type);
if (op_factory.GetName() != node_name) {
if (std::find(kMakeOperatorNotByIr.begin(), kMakeOperatorNotByIr.end(), op_type) != kMakeOperatorNotByIr.end()) {
op = ge::MakeShared<ge::OpDesc>(node_name, op_type);
op = ge::parser::MakeShared<ge::OpDesc>(node_name, op_type);
GE_CHECK_NOTNULL(op);
} else if (node_name == op_type) {
GE_RETURN_IF_ERROR(parser->DefunToPartitionedCall(node_def, op));
@@ -939,7 +939,7 @@ Status TensorFlowModelParser::AddFmkNode(ge::ComputeGraphPtr &graph, shared_ptr<
ThreadPool executor(kThreadNum);
std::mutex graphMutex;
std::vector<std::future<Status>> vectorFuture(op_node_list_size);
ge::ComputeGraphPtr graph_tmp = ge::MakeShared<ge::ComputeGraph>("tmpGraph");
ge::ComputeGraphPtr graph_tmp = ge::parser::MakeShared<ge::ComputeGraph>("tmpGraph");
GE_CHECK_NOTNULL(graph_tmp);
for (size_t j = 0; j < op_node_list_size; j++) {
const string op_node_name = op_node_name_list[j];


+ 3
- 3
parser/tensorflow/tensorflow_parser_register.h View File

@@ -25,7 +25,7 @@
#include "framework/omg/parser/op_parser.h"
#include "parser/common/op_def/ir_pb_converter.h"
#include "parser/common/op_def/operator.h"
#include "common/ge/ge_util.h"
#include "parser/common/acl_graph_parser_util.h"
#include "parser/common/op_parser_factory.h"
#include "parser/tensorflow/tensorflow_op_parser.h"
#include "proto/tensorflow/node_def.pb.h"
@@ -72,7 +72,7 @@ class TensorflowParserBuilder : public TensorflowWeightParserBuilder {
}

bool Finalize() override {
auto op_parser_adapter = ge::MakeShared<TensorflowOpParserAdapter<Param>>(*this);
auto op_parser_adapter = ge::parser::MakeShared<TensorflowOpParserAdapter<Param>>(*this);
if (op_parser_adapter == nullptr) {
GELOGE(FAILED, "Op parser adapter is null.");
}
@@ -102,7 +102,7 @@ class TensorflowOpParserAdapter : public TensorFlowOpParser {
Status ParseParams(const Message *op_src, ge::OpDescPtr &op_dest) override {
const domi::tensorflow::NodeDef *node = static_cast<const domi::tensorflow::NodeDef *>(op_src);
GE_CHECK_NOTNULL(node);
std::shared_ptr<Param> param = ge::MakeShared<Param>();
std::shared_ptr<Param> param = ge::parser::MakeShared<Param>();
if (param == nullptr) {
GELOGE(domi::FAILED, "Param is null");
return domi::FAILED;


+ 2
- 2
parser/tensorflow/tensorflow_reshape_parser.cc View File

@@ -22,7 +22,7 @@
#include "graph/utils/type_utils.h"
#include "parser/common/op_parser_factory.h"
#include "parser/tensorflow/tensorflow_util.h"
#include "common/math/math_util.h"
#include "parser/common/acl_graph_parser_util.h"

using domi::TENSORFLOW;
using namespace ge::parser;
@@ -48,7 +48,7 @@ Status TensorFlowReshapeParser::ParseDesc(const domi::tensorflow::AttrValue &att
GE_IF_BOOL_EXEC(tmp_dim < 0, real_size = tmp_dim * (-1) * real_size; continue;);
real_size *= tmp_dim;
}
FMK_INT64_MULCHECK(real_size, size_type);
PARSER_INT64_MULCHECK(real_size, size_type);
ge::TensorUtils::SetSize(ge_desc, real_size * size_type);
ge::TensorUtils::SetRealDimCnt(ge_desc, ge_desc.GetShape().GetDimNum());
GELOGI("after translate tf_desc, datatype: %s, format: %s, real size: %u, size_type: %u",


+ 3
- 3
parser/tensorflow/tensorflow_squeeze_parser.cc View File

@@ -25,7 +25,7 @@
#include "framework/omg/parser/parser_inner_ctx.h"
#include "graph/utils/type_utils.h"
#include "parser/common/op_parser_factory.h"
#include "common/math/math_util.h"
#include "parser/common/acl_graph_parser_util.h"

using domi::tensorflow::AttrValue;
using std::vector;
@@ -52,10 +52,10 @@ Status TensorFlowSqueezeParser::ParseDesc(const domi::tensorflow::AttrValue &att
for (uint32_t j = 0; j < ge_desc.GetShape().GetDimNum(); ++j) {
tmp_dim = ge_desc.GetShape().GetDim(j);
GE_IF_BOOL_EXEC(tmp_dim < 0, real_size = tmp_dim * (-1) * real_size; continue;);
FMK_INT64_MULCHECK(real_size, tmp_dim);
PARSER_INT64_MULCHECK(real_size, tmp_dim);
real_size *= tmp_dim;
}
FMK_INT64_MULCHECK(real_size, size_type);
PARSER_INT64_MULCHECK(real_size, size_type);
ge::TensorUtils::SetSize(ge_desc, real_size * size_type);
ge::TensorUtils::SetRealDimCnt(ge_desc, ge_desc.GetShape().GetDimNum());
GELOGD("after translate tf_desc, datatype: %s, format: %s, real size: %u, size_type: %u",


+ 0
- 1
parser/tensorflow/tensorflow_util.cc View File

@@ -19,7 +19,6 @@
#include <cstdlib>
#include <iostream>
#include <memory>
#include "common/math/math_util.h"
#include "framework/common/debug/ge_log.h"
#include "framework/common/debug/log.h"
#include "framework/common/op/ge_op_utils.h"


Loading…
Cancel
Save