Browse Source

parser st and ut

pull/433/head
jwx930962 4 years ago
parent
commit
63db8bbf1a
15 changed files with 1368 additions and 99 deletions
  1. +1
    -1
      tests/st/testcase/test_caffe_parser.cc
  2. +0
    -9
      tests/st/testcase/test_tensorflow_parser.cc
  3. +88
    -63
      tests/ut/parser/parser_ut_utils.cc
  4. +3
    -0
      tests/ut/parser/parser_ut_utils.h
  5. BIN
      tests/ut/parser/testcase/caffe_parser_testcase/caffe_model/caffe_add.caffemodel
  6. +36
    -0
      tests/ut/parser/testcase/caffe_parser_testcase/caffe_model/caffe_add.caffemodel.txt
  7. +28
    -0
      tests/ut/parser/testcase/caffe_parser_testcase/caffe_model/caffe_add.pbtxt
  8. +676
    -0
      tests/ut/parser/testcase/caffe_parser_testcase/caffe_parser_unittest.cc
  9. BIN
      tests/ut/parser/testcase/onnx_parser_testcase/onnx_model/onnx_clip_v9.onnx
  10. +28
    -0
      tests/ut/parser/testcase/onnx_parser_testcase/onnx_model/onnx_clip_v9.py
  11. BIN
      tests/ut/parser/testcase/onnx_parser_testcase/onnx_model/onnx_const_type.onnx
  12. BIN
      tests/ut/parser/testcase/onnx_parser_testcase/onnx_model/onnx_conv2d.onnx
  13. BIN
      tests/ut/parser/testcase/onnx_parser_testcase/onnx_model/onnx_if.onnx
  14. +116
    -0
      tests/ut/parser/testcase/onnx_parser_testcase/onnx_parser_unittest.cc
  15. +392
    -26
      tests/ut/parser/testcase/tensorflow_parser_testcase/tensorflow_parser_unittest.cc

+ 1
- 1
tests/st/testcase/test_caffe_parser.cc View File

@@ -112,7 +112,7 @@ TEST_F(STestCaffeParser, caffe_parser_user_output_with_default) {
EXPECT_EQ(net_out_name.at(0), "abs:0:abs_out");
}

TEST_F(STestCaffeParser, acal_caffe_parser) {
TEST_F(STestCaffeParser, acl_caffe_parser) {
std::string case_dir = __FILE__;
case_dir = case_dir.substr(0, case_dir.find_last_of("/"));
std::string model_file = case_dir + "/origin_models/caffe_add.pbtxt";


+ 0
- 9
tests/st/testcase/test_tensorflow_parser.cc View File

@@ -3926,15 +3926,6 @@ TEST_F(STestTensorflowParser, parser_FindFmkNodeCluser_success)
});
Status ret = graphOptimizer.FindFmkNodeCluser(node_cluser_Map);
EXPECT_EQ(ret, SUCCESS);

// node = AddNode(graph, "K", parser::NETOUTPUT, 1, 1);
// NodePtr node_a = AddNode(graph, "A", parser::NETOUTPUT, 1, 1);
// NodePtr node_b = AddNode(graph, "B", parser::NETOUTPUT, 1, 1);
// std::unordered_map<string, vector<NodePtr>> node_cluser_Map2({
// {"x", {node, node_a, node_b}},
// });
// ret = graphOptimizer.FindFmkNodeCluser(node_cluser_Map2);
// EXPECT_EQ(ret, SUCCESS);
}

TEST_F(STestTensorflowParser, parser_RebuildOutputAnchors_test)


+ 88
- 63
tests/ut/parser/parser_ut_utils.cc View File

@@ -17,6 +17,9 @@
#include "ut/parser/parser_ut_utils.h"
#include "framework/common/debug/ge_log.h"
#include "graph/utils/graph_utils.h"
#include <google/protobuf/io/coded_stream.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <google/protobuf/text_format.h>
#include <limits.h>

namespace ge {
@@ -43,6 +46,91 @@ void ParerUTestsUtils::ClearParserInnerCtx() {
ge::GetParserContext().enable_scope_fusion_passes = "";
GELOGI("Clear parser inner context successfully.");
}

MemBuffer* ParerUTestsUtils::MemBufferFromFile(const char *path) {
char path_temp[PATH_MAX + 1] = {0x00};
if(strlen(path) > PATH_MAX || nullptr == realpath(path, path_temp)) {
return nullptr;
}
FILE *fp = fopen(path_temp, "r+");
if (fp == nullptr) {
return nullptr;
}

// get model file length
if (0 != fseek(fp, 0, SEEK_END)) {
fclose(fp);
return nullptr;
}
long file_length = ftell(fp);
if (fseek(fp, 0, SEEK_SET)) {
fclose(fp);
return nullptr;
}
if (file_length <= 0) {
fclose(fp);
return nullptr;
}

// alloc model buffer
void *data = malloc((unsigned int)file_length);
if (!data) {
fclose(fp);
return nullptr;
}

// read file into memory
uint32_t read_size = (uint32_t)fread(data, 1, (unsigned int)file_length, fp);

// check if read success
if ((long)read_size != file_length) {
free(data);
data = nullptr;
fclose(fp);
return nullptr;
}

// close model file
fclose(fp);

// create an MemBuffer
MemBuffer* membuf = new MemBuffer();
if (!membuf) {
free(data);
data = nullptr;
return nullptr;
}
membuf->data = malloc((unsigned int)read_size);

// set size && data
membuf->size = (uint32_t)read_size;
memcpy((char*)membuf->data, (char*)data, read_size);
free(data);
return membuf;
}

bool ParerUTestsUtils::ReadProtoFromText(const char *file, google::protobuf::Message *message) {
std::ifstream fs(file);
if (!fs.is_open()) {
return false;
}
google::protobuf::io::IstreamInputStream input(&fs);
bool ret = google::protobuf::TextFormat::Parse(&input, message);

fs.close();
return ret;
}

void ParerUTestsUtils::WriteProtoToBinaryFile(const google::protobuf::Message &proto, const char *filename) {
size_t size = proto.ByteSizeLong();
char *buf = new char[size];
proto.SerializeToArray(buf, size);
std::ofstream out(filename);
out.write(buf, size);
out.close();
delete[] buf;
}

namespace ut {
NodePtr GraphBuilder::AddNode(const std::string &name, const std::string &type, int in_cnt, int out_cnt, Format format,
DataType data_type, std::vector<int64_t> shape) {
@@ -67,68 +155,5 @@ void GraphBuilder::AddDataEdge(const NodePtr &src_node, int src_idx, const NodeP
void GraphBuilder::AddControlEdge(const NodePtr &src_node, const NodePtr &dst_node) {
GraphUtils::AddEdge(src_node->GetOutControlAnchor(), dst_node->GetInControlAnchor());
}

ge::MemBuffer* MemBufferFromFile(const char *path) {
char path_temp[PATH_MAX + 1] = {0x00};
if(strlen(path) > PATH_MAX || nullptr == realpath(path, path_temp)) {
return nullptr;
}
FILE *fp = fopen(path_temp, "r+");
if (fp == nullptr) {
return nullptr;
}

// get model file length
if (0 != fseek(fp, 0, SEEK_END)) {
fclose(fp);
return nullptr;
}
long file_length = ftell(fp);
if (fseek(fp, 0, SEEK_SET)) {
fclose(fp);
return nullptr;
}
if (file_length <= 0) {
fclose(fp);
return nullptr;
}

// alloc model buffer
void *data = malloc((unsigned int)file_length);
if (!data) {
fclose(fp);
return nullptr;
}

// read file into memory
uint32_t read_size = (uint32_t)fread(data, 1, (unsigned int)file_length, fp);

// check if read success
if ((long)read_size != file_length) {
free(data);
data = nullptr;
fclose(fp);
return nullptr;
}

// close model file
fclose(fp);

// create an MemBuffer
MemBuffer* membuf = new MemBuffer();
if (!membuf) {
free(data);
data = nullptr;
return nullptr;
}
membuf->data = malloc((unsigned int)read_size);

// set size && data
membuf->size = (uint32_t)read_size;
memcpy((char*)membuf->data, (char*)data, read_size);
free(data);
return membuf;
}

} // namespace ut
} // namespace ge

+ 3
- 0
tests/ut/parser/parser_ut_utils.h View File

@@ -30,7 +30,10 @@ class ParerUTestsUtils {
public:
static void ClearParserInnerCtx();
static MemBuffer* MemBufferFromFile(const char *path);
static bool ReadProtoFromText(const char *file, google::protobuf::Message *message);
static void WriteProtoToBinaryFile(const google::protobuf::Message &proto, const char *filename);
};

namespace ut {
class GraphBuilder {
public:


BIN
tests/ut/parser/testcase/caffe_parser_testcase/caffe_model/caffe_add.caffemodel View File


+ 36
- 0
tests/ut/parser/testcase/caffe_parser_testcase/caffe_model/caffe_add.caffemodel.txt View File

@@ -0,0 +1,36 @@
name: "TestAdd"
input: "data"
layer {
name: "data"
type: "Input"
top: "data"
input_param { shape: { dim: 3} }
}

layer {
name: "const"
type: "Input"
top: "const"
input_param { shape: { dim: 3} }
blobs {
data: 1
data: 2
data: 3
shape {
dim: 3
}
}
}

layer {
name: "reshape"
type: "Reshape"
bottom: "data"
bottom: "const"
top: "reshpae_out"
reshape_param {
shape {
dim: 3
}
}
}

+ 28
- 0
tests/ut/parser/testcase/caffe_parser_testcase/caffe_model/caffe_add.pbtxt View File

@@ -0,0 +1,28 @@
name: "TestAdd"
input: "data"
layer {
name: "data"
type: "Input"
top: "data"
input_param { shape: { dim: 3} }
}

layer {
name: "const"
type: "Input"
top: "const"
input_param { shape: { dim: 3} }
}

layer {
name: "reshape"
type: "Reshape"
bottom: "data"
bottom: "const"
top: "reshpae_out"
reshape_param {
shape {
dim: 3
}
}
}

+ 676
- 0
tests/ut/parser/testcase/caffe_parser_testcase/caffe_parser_unittest.cc View File

@@ -15,6 +15,9 @@
*/

#include <gtest/gtest.h>

#define protected public
#define private public
#include <iostream>
#include "parser/common/op_parser_factory.h"
#include "graph/operator_reg.h"
@@ -27,6 +30,19 @@
#include "ut/parser/parser_ut_utils.h"
#include "external/ge/ge_api_types.h"
#include "tests/depends/ops_stub/ops_stub.h"
#include "proto/caffe/caffe.pb.h"
#include "parser/caffe/caffe_parser.h"
#include "parser/caffe/caffe_data_parser.h"
#include "parser/caffe/caffe_op_parser.h"
#include "parser/caffe/caffe_custom_parser_adapter.h"
#include "parser/caffe/caffe_op_parser.h"
#include "graph/operator_reg.h"
#include "parser/common/acl_graph_parser_util.h"
#undef protected
#undef private

using namespace domi::caffe;
using namespace ge;

namespace ge {
class UtestCaffeParser : public testing::Test {
@@ -42,6 +58,100 @@ class UtestCaffeParser : public testing::Test {
void RegisterCustomOp();
};

static ge::NodePtr GenNodeFromOpDesc(ge::OpDescPtr opDesc){
if (!opDesc) {
return nullptr;
}
static auto g = std::make_shared<ge::ComputeGraph>("g");
return g->AddNode(std::move(opDesc));
}

ge::ComputeGraphPtr build_graph(bool with_leaf_node = false)
{
ge::ComputeGraphPtr graph = std::make_shared<ge::ComputeGraph>("default");
ge::OpDescPtr data_op = std::make_shared<ge::OpDesc>();
data_op->SetType(parser::DATA);
data_op->SetName("Data1");
data_op->AddInputDesc(ge::GeTensorDesc());
data_op->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr data1 = graph->AddNode(data_op);

ge::OpDescPtr relu_op1 = std::make_shared<ge::OpDesc>();
relu_op1->SetType(parser::ACTIVATION);
relu_op1->SetName("Relu1");
relu_op1->AddInputDesc(ge::GeTensorDesc());
relu_op1->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr relu1 = graph->AddNode(relu_op1);

ge::OpDescPtr relu_op2 = std::make_shared<ge::OpDesc>();
relu_op2->SetType(parser::RELU);
relu_op2->SetName("Relu2");
relu_op2->AddInputDesc(ge::GeTensorDesc());
relu_op2->AddOutputDesc(ge::GeTensorDesc());
relu_op2->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr relu2 = graph->AddNode(relu_op2);

ge::OpDescPtr relu_op3 = std::make_shared<ge::OpDesc>();
relu_op3->SetType(parser::ACTIVATION);
relu_op3->SetName("Relu3");
relu_op3->AddInputDesc(ge::GeTensorDesc());
relu_op3->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr relu3;
if (with_leaf_node == true) {
relu3 = graph->AddNode(relu_op3);
}

ge::OpDescPtr mul_op = std::make_shared<ge::OpDesc>();
mul_op->SetType(parser::MUL);
mul_op->SetName("Mul");
mul_op->AddInputDesc(ge::GeTensorDesc());
mul_op->AddInputDesc(ge::GeTensorDesc());
mul_op->AddOutputDesc(ge::GeTensorDesc());
mul_op->AddOutputDesc(ge::GeTensorDesc());
mul_op->AddOutputDesc(ge::GeTensorDesc());
mul_op->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr mul = graph->AddNode(mul_op);

ge::OpDescPtr mul_op1 = std::make_shared<ge::OpDesc>();
mul_op1->SetType(parser::MUL);
mul_op1->SetName("Mul1");
mul_op1->AddInputDesc(ge::GeTensorDesc());
mul_op1->AddInputDesc(ge::GeTensorDesc());
mul_op1->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr mul1 = graph->AddNode(mul_op1);

ge::OpDescPtr mul_op2 = std::make_shared<ge::OpDesc>();
mul_op2->SetType(parser::MUL);
mul_op2->SetName("Mul2");
mul_op2->AddInputDesc(ge::GeTensorDesc());
mul_op2->AddInputDesc(ge::GeTensorDesc());
mul_op2->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr mul2 = graph->AddNode(mul_op2);

ge::OpDescPtr fc_op = std::make_shared<ge::OpDesc>();
fc_op->SetType(parser::FULL_CONNECTION);
fc_op->SetName("FullConnection");
fc_op->AddInputDesc(ge::GeTensorDesc());
fc_op->AddOutputDesc(ge::GeTensorDesc());
fc_op->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr fc = graph->AddNode(fc_op);

ge::GraphUtils::AddEdge(data1->GetOutDataAnchor(0), relu1->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(relu1->GetOutDataAnchor(0), fc->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(fc->GetOutDataAnchor(0), relu2->GetInDataAnchor(0));
if (with_leaf_node == true) {
ge::GraphUtils::AddEdge(fc->GetOutDataAnchor(1), relu3->GetInDataAnchor(0));
}
ge::GraphUtils::AddEdge(relu2->GetOutDataAnchor(0), mul->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(relu2->GetOutDataAnchor(1), mul->GetInDataAnchor(1));
ge::GraphUtils::AddEdge(mul->GetOutDataAnchor(0), mul1->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(mul->GetOutDataAnchor(1), mul1->GetInDataAnchor(1));
ge::GraphUtils::AddEdge(mul->GetOutDataAnchor(2), mul2->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(mul->GetOutDataAnchor(3), mul2->GetInDataAnchor(1));

return graph;
}

void UtestCaffeParser::RegisterCustomOp() {
std::vector<OpRegistrationData> reg_datas = domi::OpRegistry::Instance()->registrationDatas;
for (auto reg_data : reg_datas) {
@@ -130,4 +240,570 @@ TEST_F(UtestCaffeParser, caffe_parser_user_output_with_default) {
EXPECT_EQ(net_out_name.at(0), "abs:0:abs_out");
}

TEST_F(UtestCaffeParser, acl_caffe_parser) {
std::string case_dir = __FILE__;
case_dir = case_dir.substr(0, case_dir.find_last_of("/"));
std::string model_file = case_dir + "/caffe_model/caffe_add.pbtxt";
std::string weight_file_txt = case_dir + "/caffe_model/caffe_add.caffemodel.txt";
std::string weight_file = case_dir + "/caffe_model/caffe_add.caffemodel";

domi::caffe::NetParameter proto;
EXPECT_EQ(ParerUTestsUtils::ReadProtoFromText(weight_file_txt.c_str(), &proto), true);
ParerUTestsUtils::WriteProtoToBinaryFile(proto, weight_file.c_str());

ge::GetParserContext().caffe_proto_path = case_dir + "/../../../../metadef/proto/caffe/caffe.proto";

std::map<ge::AscendString, ge::AscendString> parser_params;
ge::Graph graph;
auto ret = ge::aclgrphParseCaffe(model_file.c_str(), weight_file.c_str(), parser_params, graph);
EXPECT_EQ(ret, GRAPH_FAILED);
ret = ge::aclgrphParseCaffe(model_file.c_str(), weight_file.c_str(), graph);
EXPECT_EQ(ret, GRAPH_FAILED);
}

TEST_F(UtestCaffeParser, modelparser_parsefrommemory_success)
{
std::string caseDir = __FILE__;
std::size_t idx = caseDir.find_last_of("/");
caseDir = caseDir.substr(0, idx);
std::string modelFile = caseDir + "/caffe_model/caffe_add.pbtxt";

const char* tmp_tf_pb_model = modelFile.c_str();
printf("------------model_file:%s---------------------\n", tmp_tf_pb_model);
ge::Graph graph;

ge::ComputeGraphPtr compute_graph = ge::GraphUtils::GetComputeGraph(graph);
CaffeModelParser modelParser;
MemBuffer* memBuffer = ParerUTestsUtils::MemBufferFromFile(tmp_tf_pb_model);
auto ret = modelParser.ParseFromMemory((char*)memBuffer->data, memBuffer->size, compute_graph);
free(memBuffer->data);
delete memBuffer;
EXPECT_EQ(ret, GRAPH_FAILED);
}

TEST_F(UtestCaffeParser, caffe_parser_to_json) {
std::string case_dir = __FILE__;
case_dir = case_dir.substr(0, case_dir.find_last_of("/"));
std::string model_file = case_dir + "/caffe_model/caffe_add.pbtxt";
std::map<ge::AscendString, ge::AscendString> parser_params;
CaffeModelParser caffe_parser;

const char *json_file = "tmp.json";
auto ret = caffe_parser.ToJson(model_file.c_str(), json_file);
EXPECT_EQ(ret, SUCCESS);

const char *json_null = nullptr;
ret = caffe_parser.ToJson(model_file.c_str(), json_null);
EXPECT_EQ(ret, FAILED);
const char *model_null = nullptr;
ret = caffe_parser.ToJson(model_null, json_null);
EXPECT_EQ(ret, FAILED);
}

TEST_F(UtestCaffeParser, caffe_parser_ParseParamsForDummyData_test)
{
CaffeDataParser caffe_parser;
domi::caffe::NetParameter net;
ge::OpDescPtr op = std::make_shared<ge::OpDesc>("conv", "Convolution");
domi::caffe::LayerParameter *lay = net.add_layer();
Status ret = caffe_parser.ParseParamsForDummyData(lay, op);
EXPECT_EQ(ret, FAILED);

ret = caffe_parser.ParseParamsForInput(lay, op);
EXPECT_EQ(ret, FAILED);

domi::caffe::DummyDataParameter *dummyData = lay->mutable_dummy_data_param();
ret = caffe_parser.ParseParamsForDummyData(lay, op);
EXPECT_EQ(ret, FAILED);

domi::caffe::BlobShape* dummpShape = dummyData->add_shape();
ret = caffe_parser.ParseParamsForDummyData(lay, op);
EXPECT_EQ(ret, SUCCESS);
}

TEST_F(UtestCaffeParser, convertWeights_success)
{
CaffeOpParser parser;
ge::GeTensorDesc ge_tensor_desc = ge::GeTensorDesc();
ge::GeTensorPtr weight = std::make_shared<ge::GeTensor>(ge_tensor_desc);
ge::OpDescPtr opDef = std::make_shared<ge::OpDesc>("","");
auto node_tmp = GenNodeFromOpDesc(opDef);

domi::caffe::LayerParameter *layer = new domi::caffe::LayerParameter();
domi::caffe::BlobProto *blob = layer->add_blobs();
blob->set_int8_data("12");
blob->add_data(1);
blob->add_data(1);

domi::caffe::BlobShape *shap = blob->mutable_shape();
shap->add_dim(1);
shap->add_dim(2);

Status ret = parser.ConvertWeight(*blob, "", weight);
EXPECT_EQ(domi::SUCCESS, ret);
delete layer;
}

TEST_F(UtestCaffeParser, CaffeCustomParserAdapter_ParseWeights_success)
{
CaffeCustomParserAdapter parserAdapter;
ge::OpDescPtr opDef = std::make_shared<ge::OpDesc>("","");
auto node_tmp = GenNodeFromOpDesc(opDef);
LayerParameter* layer = new LayerParameter();
Status ret = parserAdapter.ParseWeights(layer, node_tmp);
EXPECT_EQ(ret, SUCCESS);

BlobProto* blob = layer->add_blobs();
blob->add_data(1);
blob->add_data(1);
BlobShape* shap = blob->mutable_shape();
shap->add_dim(1);
shap->add_dim(2);

ret = parserAdapter.ParseWeights(layer, node_tmp);
EXPECT_EQ(ret, SUCCESS);

delete layer;
}

TEST_F(UtestCaffeParser, CaffeCustomParserAdapter_ParseParams_success)
{
ge::OpDescPtr op_desc_src = std::make_shared<ge::OpDesc>("Data", "Input");
ge::Operator op_src = ge::OpDescUtils::CreateOperatorFromOpDesc(op_desc_src);
ge::OpDescPtr op_dest = std::make_shared<ge::OpDesc>("Data", "Input");

CaffeCustomParserAdapter parserAdapter;
Status ret = parserAdapter.ParseParams(op_src, op_dest);
EXPECT_EQ(ret, PARAM_INVALID);
}

TEST_F(UtestCaffeParser, CaffeDataParser_ParseParams_success)
{
domi::caffe::NetParameter net;
ge::OpDescPtr op_desc_src = std::make_shared<ge::OpDesc>("Data", "Input");
domi::caffe::LayerParameter* lay0 = net.add_layer();
lay0->set_name("conv");
lay0->set_type(ge::parser::DUMMY_DATA);

ge::OpDescPtr opDef = std::make_shared<ge::OpDesc>("","");
CaffeDataParser parserAdapter;
Status ret = parserAdapter.ParseParams(lay0, opDef);
EXPECT_EQ(ret, FAILED);

lay0->set_type(ge::parser::ATTR_NAME_INPUT_TENSOR_DESC);
ret = parserAdapter.ParseParams(lay0, opDef);
EXPECT_EQ(ret, FAILED);
}

TEST_F(UtestCaffeParser, CaffeWeightsParser_Parse_test)
{
CaffeWeightsParser weightParser;
std::string case_dir = __FILE__;
case_dir = case_dir.substr(0, case_dir.find_last_of("/"));
std::string model_file = case_dir + "/caffe_model/caffe_add.caffemodel";
const char *file = nullptr;
ge::ComputeGraphPtr graph;
Status ret = weightParser.Parse(file, graph);
EXPECT_EQ(ret, PARAM_INVALID);

file = model_file.c_str();
ret = weightParser.Parse(file, graph);
EXPECT_EQ(ret, PARAM_INVALID);

graph = std::make_shared<ComputeGraph>("test");
ret = weightParser.Parse(file, graph);
EXPECT_EQ(ret, FAILED);
}

TEST_F(UtestCaffeParser, CaffeWeightsParser_ParseWeightByFusionProto_test)
{
CaffeWeightsParser weightParser;
std::string case_dir = __FILE__;
case_dir = case_dir.substr(0, case_dir.find_last_of("/"));
std::string weight_file = case_dir + "/caffe_model/caffe_add.caffemodel";
std::string model_file = case_dir + "/../../../../metadef/proto/caffe/caffe.proto";
const char *weight_path = model_file.c_str();
std::string fusion_proto_path = model_file;
std::string fusion_proto_name = "caffe";
ge::ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
Status ret = weightParser.ParseWeightByFusionProto(weight_path, fusion_proto_path, fusion_proto_name, graph);
EXPECT_EQ(ret, FAILED);
}

TEST_F(UtestCaffeParser, CaffeWeightsParser_ParseFromMemory_test)
{
CaffeWeightsParser weightParser;
std::string case_dir = __FILE__;
case_dir = case_dir.substr(0, case_dir.find_last_of("/"));
std::string weight_file = case_dir + "/caffe_model/caffe_add.caffemodel";
ge::ComputeGraphPtr graph;
const char *data = nullptr;
Status ret = weightParser.ParseFromMemory(data, 1, graph);
EXPECT_EQ(ret, PARAM_INVALID);

data = weight_file.c_str();
ret = weightParser.ParseFromMemory(data, 1, graph);
EXPECT_EQ(ret, PARAM_INVALID);

graph = std::make_shared<ComputeGraph>("test");
ret = weightParser.ParseFromMemory(data, 1, graph);
EXPECT_EQ(ret, domi::PARSE_WEIGHTS_FAILED);

CaffeModelParser model_parser;
ret = model_parser.ParseFromMemory(data, 1, graph);
EXPECT_EQ(ret, FAILED);
}

TEST_F(UtestCaffeParser, CaffeWeightsParser_CreateCustomOperator_test)
{
CaffeModelParser model_parser;

vector<ge::Operator> operators;
ge::OpDescPtr op_desc_src = std::make_shared<ge::OpDesc>("Data", "Input");
ge::Operator op_src = ge::OpDescUtils::CreateOperatorFromOpDesc(op_desc_src);
operators.emplace_back(op_src);
std::string op_name = "";
std::string op_type = "";
domi::caffe::NetParameter net;
domi::caffe::LayerParameter *lay0 = net.add_layer();
lay0->set_name("Data");
lay0->set_type("Input");
Status ret = model_parser.CreateCustomOperator(op_name, op_type, &net, 1, operators);
EXPECT_EQ(ret, FAILED);

op_name = "Data";
op_type = "Input";
ret = model_parser.CreateCustomOperator(op_name, op_type, &net, 1, operators);
EXPECT_EQ(ret, SUCCESS);

model_parser.AddOutputInfoToContext(op_name, 1);
}

TEST_F(UtestCaffeParser, CaffeWeightsParser_ParseOutputNodeTopInfo_test)
{
CaffeModelParser model_parser;
AclGrphParseUtil acl_graph_parse_util;

domi::caffe::NetParameter net;
domi::caffe::LayerParameter *lay0 = net.add_layer();
lay0->set_name("Data");
lay0->set_type("Input");
Status ret = model_parser.ParseOutputNodeTopInfo(net);
EXPECT_EQ(ret, SUCCESS);

GetParserContext().type = domi::CAFFE;
string graph_name;
std::map<AscendString, AscendString> out_nodes_with_tensor_name1 = {
{AscendString(ge::ir_option::OUT_NODES), AscendString("Out_tensor_1;Out_tensor_2")}};
acl_graph_parse_util.ParseParamsBeforeGraph(out_nodes_with_tensor_name1, graph_name);
ret = model_parser.ParseOutputNodeTopInfo(net);
EXPECT_EQ(ret, PARAM_INVALID);
}

TEST_F(UtestCaffeParser, CaffeOpParser_ParseWeightType_test)
{
CaffeOpParser opParser;
ge::GeTensorDesc ge_tensor_desc = ge::GeTensorDesc();
ge::GeTensorPtr weight = std::make_shared<ge::GeTensor>(ge_tensor_desc);
ge::OpDescPtr opDef = std::make_shared<ge::OpDesc>("","");
auto node_tmp = GenNodeFromOpDesc(opDef);

domi::caffe::LayerParameter *layer = new domi::caffe::LayerParameter();
domi::caffe::BlobProto *blob = layer->add_blobs();
blob->set_int8_data("10");
std::string lay_name = "DATA";
GeShape shape({1,1,3,4});
Status ret = opParser.ParseWeightType(*blob, shape, 1, lay_name, weight);
EXPECT_EQ(ret, FAILED);
delete layer;
}

TEST_F(UtestCaffeParser, CaffeOpParser_ParseWeightType_test2)
{
CaffeOpParser opParser;
ge::GeTensorDesc ge_tensor_desc = ge::GeTensorDesc();
ge::GeTensorPtr weight = std::make_shared<ge::GeTensor>(ge_tensor_desc);
ge::OpDescPtr opDef = std::make_shared<ge::OpDesc>("","");
auto node_tmp = GenNodeFromOpDesc(opDef);

domi::caffe::LayerParameter *layer = new domi::caffe::LayerParameter();
domi::caffe::BlobProto *blob = layer->add_blobs();
blob->add_int32_data(10);

std::string lay_name = "DATA";
GeShape shape({1,1,3,4});
Status ret = opParser.ParseWeightType(*blob, shape, 1, lay_name, weight);
EXPECT_EQ(ret, SUCCESS);

ret = opParser.ParseWeightType(*blob, shape, 2, lay_name, weight);
EXPECT_EQ(ret, FAILED);
delete layer;
}

TEST_F(UtestCaffeParser, CaffeOpParser_ParseWeightType_test3)
{
CaffeOpParser opParser;
ge::GeTensorDesc ge_tensor_desc = ge::GeTensorDesc();
ge::GeTensorPtr weight = std::make_shared<ge::GeTensor>(ge_tensor_desc);
ge::OpDescPtr opDef = std::make_shared<ge::OpDesc>("","");
auto node_tmp = GenNodeFromOpDesc(opDef);

domi::caffe::LayerParameter *layer = new domi::caffe::LayerParameter();
domi::caffe::BlobProto *blob = layer->add_blobs();
double value = 2.0;
blob->add_double_data(value);

std::string lay_name = "DATA";
GeShape shape({1,1,3,4});
Status ret = opParser.ParseWeightType(*blob, shape, 1, lay_name, weight);
EXPECT_EQ(ret, SUCCESS);

ret = opParser.ParseWeightType(*blob, shape, 3, lay_name, weight);
EXPECT_EQ(ret, FAILED);
delete layer;
}

TEST_F(UtestCaffeParser, CaffeOpParser_ParseWeightType_test4)
{
CaffeOpParser opParser;
ge::GeTensorDesc ge_tensor_desc = ge::GeTensorDesc();
ge::GeTensorPtr weight = std::make_shared<ge::GeTensor>(ge_tensor_desc);
ge::OpDescPtr opDef = std::make_shared<ge::OpDesc>("","");
auto node_tmp = GenNodeFromOpDesc(opDef);

domi::caffe::LayerParameter *layer = new domi::caffe::LayerParameter();
domi::caffe::BlobProto *blob = layer->add_blobs();
blob->add_uint64_data(10);

std::string lay_name = "DATA";
GeShape shape({1,1,3,4});
Status ret = opParser.ParseWeightType(*blob, shape, 1, lay_name, weight);
EXPECT_EQ(ret, SUCCESS);

ret = opParser.ParseWeightType(*blob, shape, 2, lay_name, weight);
EXPECT_EQ(ret, FAILED);
delete layer;
}

TEST_F(UtestCaffeParser, CaffeOpParser_ParseWeightType_test5)
{
CaffeOpParser opParser;
ge::GeTensorDesc ge_tensor_desc = ge::GeTensorDesc();
ge::GeTensorPtr weight = std::make_shared<ge::GeTensor>(ge_tensor_desc);
ge::OpDescPtr opDef = std::make_shared<ge::OpDesc>("","");
auto node_tmp = GenNodeFromOpDesc(opDef);

domi::caffe::LayerParameter *layer = new domi::caffe::LayerParameter();
domi::caffe::BlobProto *blob = layer->add_blobs();
blob->add_data(10);

std::string lay_name = "DATA";
GeShape shape({1,1,3,4});
Status ret = opParser.ParseWeightType(*blob, shape, 10, lay_name, weight);
EXPECT_EQ(ret, FAILED);
delete layer;
}

TEST_F(UtestCaffeParser, CaffeOpParser_ConvertShape_test)
{
CaffeOpParser opParser;
domi::caffe::LayerParameter *layer = new domi::caffe::LayerParameter();
domi::caffe::BlobProto *blob = layer->add_blobs();
blob->set_num(1);
blob->set_channels(2);
blob->set_height(1);
blob->set_width(1);
std::vector<int64_t> shape;

opParser.ConvertShape(*blob, shape);
delete layer;
}

TEST_F(UtestCaffeParser, CaffeModelParser_ParseInput_test)
{
CaffeModelParser modelParser;
domi::caffe::NetParameter net;
net.add_input("111");
net.add_input_dim(1);
bool input_data_flag = true;

Status ret = modelParser.ParseInput(net, input_data_flag);
EXPECT_EQ(ret, FAILED);

net.add_input_dim(2);
net.add_input_dim(3);
net.add_input_dim(4);
domi::caffe::LayerParameter *lay0 = net.add_layer();
BlobProto* blob = lay0->add_blobs();
blob->add_data(1);
blob->add_data(1);
BlobShape* shap = blob->mutable_shape();
shap->add_dim(1);
shap->add_dim(2);
ret = modelParser.ParseInput(net, input_data_flag);
EXPECT_EQ(ret, SUCCESS);

net.add_input_shape();
ret = modelParser.ParseInput(net, input_data_flag);
EXPECT_EQ(ret, FAILED);
}

TEST_F(UtestCaffeParser, CaffeModelParser_CustomProtoParse_test)
{
CaffeModelParser modelParser;
std::string case_dir = __FILE__;
case_dir = case_dir.substr(0, case_dir.find_last_of("/"));
std::string model_file = case_dir + "/caffe_model/";
const char *model_path = model_file.c_str();

std::string custom_proto = model_file;
std::string caffe_proto = model_file;
std::vector<ge::Operator> operators;
ge::OpDescPtr op_desc_src = std::make_shared<ge::OpDesc>("Data", "Input");
ge::Operator op_src = ge::OpDescUtils::CreateOperatorFromOpDesc(op_desc_src);
operators.emplace_back(op_src);

Status ret = modelParser.CustomProtoParse(model_path, custom_proto, caffe_proto, operators);
EXPECT_EQ(ret, PARAM_INVALID);
}

TEST_F(UtestCaffeParser, CaffeWeightsParser_ParseGraph_test)
{
CaffeWeightsParser weightParser;
ge::ComputeGraphPtr compute_graph = ge::parser::MakeShared<ge::ComputeGraph>("tmp_graph");
ge::Graph graph = ge::GraphUtils::CreateGraphFromComputeGraph(compute_graph);

std::string case_dir = __FILE__;
case_dir = case_dir.substr(0, case_dir.find_last_of("/"));
std::string weight_file = case_dir + "/caffe_model/caffe_add.caffemodel";
const char *file = weight_file.c_str();

Status ret = weightParser.Parse(file, graph);
EXPECT_EQ(ret, FAILED);
}

TEST_F(UtestCaffeParser, CaffeWeightsParser_ConvertNetParameter_test)
{
CaffeWeightsParser weightParser;
domi::caffe::NetParameter net;

ge::ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
domi::caffe::LayerParameter *lay0 = net.add_layer();
lay0->set_name("Data");
lay0->set_type("Input");

Status ret = weightParser.ConvertNetParameter(net, graph);
EXPECT_EQ(ret, SUCCESS);
}

TEST_F(UtestCaffeParser, CaffeModelParser_IsOpAttrEmpty_test)
{
CaffeModelParser model_parser;
ge::OpDescPtr op_desc_src = std::make_shared<ge::OpDesc>("Data", "Input");
ge::Operator op_src = ge::OpDescUtils::CreateOperatorFromOpDesc(op_desc_src);
std::string type = "custom";

bool ret = model_parser.IsOpAttrEmpty(op_src, type);
EXPECT_EQ(ret, true);

type = "built-in";
ret = model_parser.IsOpAttrEmpty(op_src, type);
EXPECT_EQ(ret, true);
}

TEST_F(UtestCaffeParser, CaffeModelParser_GetCustomOp_test)
{
CaffeModelParser model_parser;
domi::caffe::NetParameter net;
domi::caffe::LayerParameter *layer = net.add_layer();
layer->set_name("Data");
layer->set_type("Input");

vector<ge::Operator> operators;
ge::OpDescPtr op_desc_src = std::make_shared<ge::OpDesc>("Data", "Input");
ge::Operator op_src = ge::OpDescUtils::CreateOperatorFromOpDesc(op_desc_src);
operators.emplace_back(op_src);

Status ret = model_parser.GetCustomOp(*layer, operators);
EXPECT_EQ(ret, SUCCESS);
}

TEST_F(UtestCaffeParser, CaffeModelParser_AddTensorDescToOpDesc_test)
{
CaffeModelParser model_parser;
domi::caffe::NetParameter net;
ge::OpDescPtr op_desc_src = std::make_shared<ge::OpDesc>("Abs", "AbsVal");
domi::caffe::LayerParameter *layer = net.add_layer();
layer->set_name("Abs");
layer->set_type("AbsVal");
layer->add_bottom("Abs");

Status ret = model_parser.AddTensorDescToOpDesc(op_desc_src, *layer);
EXPECT_EQ(ret, SUCCESS);
}

TEST_F(UtestCaffeParser, CaffeWeightsParser_ConvertLayerParameter_test)
{
CaffeWeightsParser weightParser;
ge::ComputeGraphPtr compute_graph = ge::parser::MakeShared<ge::ComputeGraph>("tmp_graph");
domi::caffe::NetParameter net;
ge::OpDescPtr op_desc_src = std::make_shared<ge::OpDesc>("Abs", "AbsVal");
domi::caffe::LayerParameter *layer = net.add_layer();
layer->set_name("Abs");
layer->set_type("AbsVal");

Status ret = weightParser.ConvertLayerParameter(layer, compute_graph);
EXPECT_EQ(ret, SUCCESS);
}

TEST_F(UtestCaffeParser, CaffeWeightsParser_CheckLayersSize_test)
{
CaffeWeightsParser weightParser;
domi::caffe::NetParameter net;
domi::caffe::LayerParameter *layer = net.add_layer();
layer->set_name("Abs");
layer->set_type("AbsVal");

Status ret = weightParser.CheckLayersSize(layer);
EXPECT_EQ(ret, FAILED);
}

TEST_F(UtestCaffeParser, CaffeWeightsParser_ConvertLayerProto_test)
{
CaffeWeightsParser weightParser;
domi::caffe::NetParameter net;
domi::caffe::LayerParameter *layer = net.add_layer();
layer->set_name("Abs");
layer->set_type("AbsVal");

Status ret = weightParser.ConvertLayerProto(&net, &net);
EXPECT_EQ(ret, SUCCESS);

BlobProto* blob = layer->add_blobs();
blob->add_data(1);
blob->add_data(1);
BlobShape* shap = blob->mutable_shape();
shap->add_dim(1);
shap->add_dim(2);
ret = weightParser.ConvertBlobsProto(&net, &net);
EXPECT_EQ(ret, SUCCESS);

ret = weightParser.ConvertBlobShapeProto(&net, &net);
EXPECT_EQ(ret, SUCCESS);

ret = weightParser.ConvertConvParamProto(&net, &net);
EXPECT_EQ(ret, SUCCESS);

ret = weightParser.ConvertInnerProdcutProto(&net, &net);
EXPECT_EQ(ret, SUCCESS);
}

TEST_F(UtestCaffeParser, CaffeWeightsParser_CheckNodes_test)
{
CaffeWeightsParser weightParser;
ge::ComputeGraphPtr compute_graph = build_graph(true);
Status ret = weightParser.CheckNodes(compute_graph);
EXPECT_EQ(ret, SUCCESS);
}

} // namespace ge

BIN
tests/ut/parser/testcase/onnx_parser_testcase/onnx_model/onnx_clip_v9.onnx View File


+ 28
- 0
tests/ut/parser/testcase/onnx_parser_testcase/onnx_model/onnx_clip_v9.py View File

@@ -0,0 +1,28 @@
import onnx
from onnx import helper
from onnx import AttributeProto, TensorProto, GraphProto


def make_clip_V9():
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [3, 4, 5])
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [3, 4, 5])
node_def = helper.make_node('Clip',
inputs=['X'],
outputs=['Y'],
max = 1.0,
min = -1.0,
)
graph = helper.make_graph(
[node_def],
"test_clip_case_V9",
[X],
[Y],
)

model = helper.make_model(graph, producer_name="onnx-mul_test")
model.opset_import[0].version = 9
onnx.save(model, "./onnx_clip_v9.onnx")


if __name__ == '__main__':
make_clip_V9()

BIN
tests/ut/parser/testcase/onnx_parser_testcase/onnx_model/onnx_const_type.onnx View File


BIN
tests/ut/parser/testcase/onnx_parser_testcase/onnx_model/onnx_conv2d.onnx View File


BIN
tests/ut/parser/testcase/onnx_parser_testcase/onnx_model/onnx_if.onnx View File


+ 116
- 0
tests/ut/parser/testcase/onnx_parser_testcase/onnx_parser_unittest.cc View File

@@ -25,6 +25,14 @@
#include "ut/parser/parser_ut_utils.h"
#include "external/ge/ge_api_types.h"
#include "tests/depends/ops_stub/ops_stub.h"
#include "parser/onnx/onnx_parser.h"

#define protected public
#define private public
#include "parser/onnx/onnx_constant_parser.h"
#include "parser/onnx/onnx_util.h"
#undef protected
#undef private

namespace ge {
class UtestOnnxParser : public testing::Test {
@@ -40,7 +48,59 @@ class UtestOnnxParser : public testing::Test {
void RegisterCustomOp();
};

static Status ParseParams(const google::protobuf::Message* op_src, ge::Operator& op_dest) {
return SUCCESS;
}

static Status ParseParamByOpFunc(const ge::Operator &op_src, ge::Operator& op_dest) {
return SUCCESS;
}

Status ParseSubgraphPostFnIf(const std::string& subgraph_name, const ge::Graph& graph) {
domi::AutoMappingSubgraphIOIndexFunc auto_mapping_subgraph_index_func =
domi::FrameworkRegistry::Instance().GetAutoMappingSubgraphIOIndexFunc(domi::ONNX);
if (auto_mapping_subgraph_index_func == nullptr) {
std::cout<<"auto mapping if subgraph func is nullptr!"<<std::endl;
return FAILED;
}
return auto_mapping_subgraph_index_func(graph,
[&](int data_index, int &parent_index) -> Status {
parent_index = data_index + 1;
return SUCCESS;
},
[&](int output_index, int &parent_index) -> Status {
parent_index = output_index;
return SUCCESS;
});
}

void UtestOnnxParser::RegisterCustomOp() {
REGISTER_CUSTOM_OP("Conv2D")
.FrameworkType(domi::ONNX)
.OriginOpType("ai.onnx::11::Conv")
.ParseParamsFn(ParseParams);

// register if op info to GE
REGISTER_CUSTOM_OP("If")
.FrameworkType(domi::ONNX)
.OriginOpType({"ai.onnx::9::If",
"ai.onnx::10::If",
"ai.onnx::11::If",
"ai.onnx::12::If",
"ai.onnx::13::If"})
.ParseParamsFn(ParseParams)
.ParseParamsByOperatorFn(ParseParamByOpFunc)
.ParseSubgraphPostFn(ParseSubgraphPostFnIf);

REGISTER_CUSTOM_OP("Add")
.FrameworkType(domi::ONNX)
.OriginOpType("ai.onnx::11::Add")
.ParseParamsFn(ParseParams);

REGISTER_CUSTOM_OP("Identity")
.FrameworkType(domi::ONNX)
.OriginOpType("ai.onnx::11::Identity")
.ParseParamsFn(ParseParams);
std::vector<OpRegistrationData> reg_datas = domi::OpRegistry::Instance()->registrationDatas;
for (auto reg_data : reg_datas) {
OpRegistrationTbe::Instance()->Finalize(reg_data);
@@ -126,4 +186,60 @@ TEST_F(UtestOnnxParser, onnx_parser_user_output_with_tensor_failed) {
EXPECT_EQ(ret, FAILED);
}

TEST_F(UtestOnnxParser, onnx_parser_expand_one_to_many) {
std::string case_dir = __FILE__;
case_dir = case_dir.substr(0, case_dir.find_last_of("/"));
std::string model_file = case_dir + "/onnx_model/onnx_clip_v9.onnx";
std::map<ge::AscendString, ge::AscendString> parser_params;
ge::Graph graph;
auto ret = ge::aclgrphParseONNX(model_file.c_str(), parser_params, graph);
EXPECT_EQ(ret, GRAPH_SUCCESS);

MemBuffer *buffer = ParerUTestsUtils::MemBufferFromFile(model_file.c_str());
ret = ge::aclgrphParseONNXFromMem(reinterpret_cast<char *>(buffer->data), buffer->size, parser_params, graph);
EXPECT_EQ(ret, GRAPH_SUCCESS);
}

TEST_F(UtestOnnxParser, onnx_parser_to_json) {
std::string case_dir = __FILE__;
case_dir = case_dir.substr(0, case_dir.find_last_of("/"));
std::string model_file = case_dir + "/onnx_model/onnx_clip_v9.onnx";
std::map<ge::AscendString, ge::AscendString> parser_params;
OnnxModelParser onnx_parser;

const char *json_file = "tmp.json";
auto ret = onnx_parser.ToJson(model_file.c_str(), json_file);
EXPECT_EQ(ret, SUCCESS);

const char *json_null = nullptr;
ret = onnx_parser.ToJson(model_file.c_str(), json_null);
EXPECT_EQ(ret, FAILED);
const char *model_null = nullptr;
ret = onnx_parser.ToJson(model_null, json_null);
EXPECT_EQ(ret, FAILED);
}

TEST_F(UtestOnnxParser, onnx_parser_const_data_type) {
std::string case_dir = __FILE__;
case_dir = case_dir.substr(0, case_dir.find_last_of("/"));
std::string model_file = case_dir + "/onnx_model/onnx_const_type.onnx";
std::map<ge::AscendString, ge::AscendString> parser_params;
ge::Graph graph;
auto ret = ge::aclgrphParseONNX(model_file.c_str(), parser_params, graph);
EXPECT_EQ(ret, GRAPH_SUCCESS);
}

TEST_F(UtestOnnxParser, OnnxModelParser_ConvertToGeDataType_test)
{
OnnxModelParser model_parser;
uint32_t type = OnnxDataType::FLOAT;

Status ret = model_parser.ConvertToGeDataType(type);
EXPECT_EQ(ret, SUCCESS);

type = 20;
ret = model_parser.ConvertToGeDataType(type);
EXPECT_EQ(ret, ge::DataType::DT_UNDEFINED);
}

} // namespace ge

+ 392
- 26
tests/ut/parser/testcase/tensorflow_parser_testcase/tensorflow_parser_unittest.cc View File

@@ -71,6 +71,11 @@
#include "parser/common/parser_fp16_t.h"
#include "parser/common/op_parser_factory.h"
#include "parser/common/prototype_pass_manager.h"
#include "parser/common/register_tbe.h"
#include "parser/common/pass_manager.h"
#include "parser/tensorflow/graph_optimizer.h"
#include "metadef/inc/register/scope/scope_pass_registry_impl.h"
#include "register/scope/scope_fusion_pass_register.h"
#undef protected
#undef private

@@ -110,6 +115,14 @@ public:
}
};

class ErrorGraphPass: public GraphPass
{
Status Run(ComputeGraphPtr graph)
{
return domi::FAILED;
}
};

class ScopeTestPass : public ScopeBasePass {
protected:
vector<ScopeFusionPatterns> DefinePatterns() {
@@ -206,7 +219,6 @@ namespace {
nodeDef->set_op("Const");
::google::protobuf::Map<std::string, tensorflow::AttrValue >* node_attr_map = nodeDef->mutable_attr();

//¨¦¨¨?? T¨º?D?
domi::tensorflow::AttrValue t_attr_value;
t_attr_value.set_type(domi::tensorflow::DT_INT32);
(*node_attr_map)[TENSORFLOW_ATTR_T] = t_attr_value;
@@ -221,7 +233,7 @@ namespace {
list->add_s("MatMul");
(*node_attr_map)[TENSORFLOW_ATTR_OUTPUT_OP] = outputs_attr_value;

// ¨¦¨¨?? tensor ¨º?D?
// ����?? tensor ��?D?
domi::tensorflow::AttrValue value_attr_value;
tensorflow::TensorProto* tensor = value_attr_value.mutable_tensor();
tensorflow::TensorShapeProto* tensor_shape = tensor->mutable_tensor_shape();
@@ -246,7 +258,6 @@ namespace {
nodeDef->set_op("VariableV2");
google::protobuf::Map<std::string, tensorflow::AttrValue > *node_attr_map = nodeDef->mutable_attr();

//¨¦¨¨??data_format¨º?D?
domi::tensorflow::AttrValue format_attr_value;
format_attr_value.set_s("_FZ");
(*node_attr_map)[VAR_ATTR_FORMAT] = format_attr_value;
@@ -276,14 +287,13 @@ namespace {
shape.mutable_list()->add_i((int64)14);
shape.mutable_list()->add_i((int64)14);

//¨¦¨¨??data_format¨º?D?
domi::tensorflow::AttrValue df_attr_value;
domi::tensorflow::AttrValue df_attr_value2;
df_attr_value2.set_s(TENSORFLOWF_TENSOR_NHWC);

df_attr_value.set_i((int64_t)ccTensorFormat_t::CC_TENSOR_NHWC);
(*node_attr_map)[TENSORFLOW_ATTR_DATA_FORMAT] = df_attr_value2;
//¨¦¨¨??padding¨º?D?
domi::tensorflow::AttrValue pad_attr_value;
domi::tensorflow::AttrValue pad_attr_value2;
pad_attr_value2.set_s(TENSORFLOWF_OP_PADDING_SAME);
@@ -306,17 +316,14 @@ namespace {
nodeDef->set_op("TemporaryVariable");
google::protobuf::Map<std::string, tensorflow::AttrValue> *node_attr_map = nodeDef->mutable_attr();

//¨¦¨¨??dtype¨º?D?
domi::tensorflow::AttrValue type_attr;
type_attr.set_type(domi::tensorflow::DT_FLOAT);
(*node_attr_map)[VAR_ATTR_DTYPE] = type_attr;

//¨¦¨¨??var_name¨º?D?
domi::tensorflow::AttrValue var_name_attr_value;
var_name_attr_value.set_s("temporary_variable_name");
(*node_attr_map)[ge::VAR_ATTR_NAME] = var_name_attr_value;

//¨¦¨¨??shape¨º?D?
domi::tensorflow::AttrValue shape_attr_value;
shape_attr_value.mutable_shape()->add_dim()->set_size(1);
shape_attr_value.mutable_shape()->add_dim()->set_size(2);
@@ -330,14 +337,12 @@ namespace {
shape.mutable_list()->add_i((int64)14);
shape.mutable_list()->add_i((int64)14);

//¨¦¨¨??data_format¨º?D?
domi::tensorflow::AttrValue df_attr_value2;
df_attr_value2.set_s(TENSORFLOWF_TENSOR_NHWC);
(*node_attr_map)[TENSORFLOW_ATTR_DATA_FORMAT] = df_attr_value2;
domi::tensorflow::AttrValue df_attr_value;
df_attr_value.set_i((int64_t)ccTensorFormat_t::CC_TENSOR_NHWC);

//¨¦¨¨??padding¨º?D?
domi::tensorflow::AttrValue pad_attr_value2;
pad_attr_value2.set_s(TENSORFLOWF_OP_PADDING_SAME);
(*node_attr_map)[TENSORFLOW_ATTR_PADDING] = pad_attr_value2;
@@ -359,7 +364,6 @@ namespace {
NodeDef *nodeDef = new NodeDef();
google::protobuf::Map<std::string, tensorflow::AttrValue> *node_attr_map = nodeDef->mutable_attr();

//¨¦¨¨?? type¨º?D?
domi::tensorflow::AttrValue dtype_attr_value ;

if (index == 0) {
@@ -370,12 +374,11 @@ namespace {
dtype_attr_value.set_type(tensorflow::DT_HALF);
}
(*node_attr_map)[ge::TENSORFLOW_ATTR_DTYPE] = dtype_attr_value;
//¨¦¨¨??data_format¨º?D?
domi::tensorflow::AttrValue df_attr_value;
df_attr_value.set_s(TENSORFLOWF_TENSOR_NCHW);
(*node_attr_map)[TENSORFLOW_ATTR_DATA_FORMAT] = df_attr_value;

// ¨¦¨¨?? tensor ¨º?D?
domi::tensorflow::AttrValue value_attr_value;
::tensorflow::TensorProto* tensor = value_attr_value.mutable_tensor();
::tensorflow::TensorShapeProto* tensor_shape = tensor->mutable_tensor_shape();
@@ -585,12 +588,11 @@ namespace {
NodeDef *nodeDef = new NodeDef();
google::protobuf::Map<std::string, tensorflow::AttrValue> *node_attr_map = nodeDef->mutable_attr();

//¨¦¨¨??T¨º?D?
domi::tensorflow::AttrValue dtype_attr_value ;
dtype_attr_value.set_type(domi::tensorflow::DT_FLOAT);
(*node_attr_map)[TENSORFLOW_ATTR_T] = dtype_attr_value;

//¨¦¨¨??strides¨º?D?
//����??strides��?D?
domi::tensorflow::AttrValue axis_attr_value;
::tensorflow::AttrValue_ListValue* list = axis_attr_value.mutable_list();
list->add_i(1);
@@ -605,12 +607,12 @@ namespace {
NodeDef *nodeDef = new NodeDef();
::google::protobuf::Map<std::string, tensorflow::AttrValue > *node_attr_map = nodeDef->mutable_attr();

//¨¦¨¨??T¨º?D?
//����??T��?D?
domi::tensorflow::AttrValue dtype_attr_value ;
dtype_attr_value.set_type(domi::tensorflow::DT_FLOAT);
(*node_attr_map)[TENSORFLOW_ATTR_T] = dtype_attr_value;

//¨¦¨¨??strides¨º?D?
//����??strides��?D?
domi::tensorflow::AttrValue axis_attr_value;
::tensorflow::AttrValue_ListValue* list = axis_attr_value.mutable_list();
list->add_i(1);
@@ -740,6 +742,92 @@ namespace {
node_def->add_input(input);
return node_def;
}

ge::ComputeGraphPtr build_graph(bool with_leaf_node = false)
{
ge::ComputeGraphPtr graph = std::make_shared<ge::ComputeGraph>("default");
ge::OpDescPtr data_op = std::make_shared<ge::OpDesc>();
data_op->SetType(parser::DATA);
data_op->SetName("Data1");
data_op->AddInputDesc(ge::GeTensorDesc());
data_op->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr data1 = graph->AddNode(data_op);

ge::OpDescPtr relu_op1 = std::make_shared<ge::OpDesc>();
relu_op1->SetType(parser::ACTIVATION);
relu_op1->SetName("Relu1");
relu_op1->AddInputDesc(ge::GeTensorDesc());
relu_op1->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr relu1 = graph->AddNode(relu_op1);

ge::OpDescPtr relu_op2 = std::make_shared<ge::OpDesc>();
relu_op2->SetType(parser::RELU);
relu_op2->SetName("Relu2");
relu_op2->AddInputDesc(ge::GeTensorDesc());
relu_op2->AddOutputDesc(ge::GeTensorDesc());
relu_op2->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr relu2 = graph->AddNode(relu_op2);

ge::OpDescPtr relu_op3 = std::make_shared<ge::OpDesc>();
relu_op3->SetType(parser::ACTIVATION);
relu_op3->SetName("Relu3");
relu_op3->AddInputDesc(ge::GeTensorDesc());
relu_op3->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr relu3;
if (with_leaf_node == true) {
relu3 = graph->AddNode(relu_op3);
}

ge::OpDescPtr mul_op = std::make_shared<ge::OpDesc>();
mul_op->SetType(parser::MUL);
mul_op->SetName("Mul");
mul_op->AddInputDesc(ge::GeTensorDesc());
mul_op->AddInputDesc(ge::GeTensorDesc());
mul_op->AddOutputDesc(ge::GeTensorDesc());
mul_op->AddOutputDesc(ge::GeTensorDesc());
mul_op->AddOutputDesc(ge::GeTensorDesc());
mul_op->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr mul = graph->AddNode(mul_op);

ge::OpDescPtr mul_op1 = std::make_shared<ge::OpDesc>();
mul_op1->SetType(parser::MUL);
mul_op1->SetName("Mul1");
mul_op1->AddInputDesc(ge::GeTensorDesc());
mul_op1->AddInputDesc(ge::GeTensorDesc());
mul_op1->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr mul1 = graph->AddNode(mul_op1);

ge::OpDescPtr mul_op2 = std::make_shared<ge::OpDesc>();
mul_op2->SetType(parser::MUL);
mul_op2->SetName("Mul2");
mul_op2->AddInputDesc(ge::GeTensorDesc());
mul_op2->AddInputDesc(ge::GeTensorDesc());
mul_op2->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr mul2 = graph->AddNode(mul_op2);

ge::OpDescPtr fc_op = std::make_shared<ge::OpDesc>();
fc_op->SetType(parser::FULL_CONNECTION);
fc_op->SetName("FullConnection");
fc_op->AddInputDesc(ge::GeTensorDesc());
fc_op->AddOutputDesc(ge::GeTensorDesc());
fc_op->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr fc = graph->AddNode(fc_op);

ge::GraphUtils::AddEdge(data1->GetOutDataAnchor(0), relu1->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(relu1->GetOutDataAnchor(0), fc->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(fc->GetOutDataAnchor(0), relu2->GetInDataAnchor(0));
if (with_leaf_node == true) {
ge::GraphUtils::AddEdge(fc->GetOutDataAnchor(1), relu3->GetInDataAnchor(0));
}
ge::GraphUtils::AddEdge(relu2->GetOutDataAnchor(0), mul->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(relu2->GetOutDataAnchor(1), mul->GetInDataAnchor(1));
ge::GraphUtils::AddEdge(mul->GetOutDataAnchor(0), mul1->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(mul->GetOutDataAnchor(1), mul1->GetInDataAnchor(1));
ge::GraphUtils::AddEdge(mul->GetOutDataAnchor(2), mul2->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(mul->GetOutDataAnchor(3), mul2->GetInDataAnchor(1));
return graph;
}

}

namespace {
@@ -1280,7 +1368,7 @@ TEST_F(UtestTensorflowParser, tensorflow_ParserProto_failed)
ret = tensorflow_parser.ParseProto(reinterpret_cast<google::protobuf::Message *>(&graphDef), root_graph);
EXPECT_EQ(PARAM_INVALID, ret);

// proto?a??¨º¡ì¡ã¨¹
// proto?a??����㨹
bool protoRet = parser::ReadProtoFromText(root_proto.c_str(), &graphDef);
ASSERT_EQ(protoRet, false);
ret = tensorflow_parser.ParseProto(reinterpret_cast<google::protobuf::Message *>(&graphDef), root_graph);
@@ -1713,7 +1801,7 @@ TEST_F(UtestTensorflowParser, tensorflow_squeeze_test) {
domi::tensorflow::AttrValue dtype_attr_value ;
dtype_attr_value.set_type(domi::tensorflow::DT_FLOAT);
(*node_attr_map)[TENSORFLOW_ATTR_T] = dtype_attr_value;
//¨¦¨¨??strides¨º?D?
//����??strides��?D?
tensorflow::AttrValue axis_attr_value;
tensorflow::AttrValue_ListValue *list = axis_attr_value.mutable_list();
list->add_i(1);
@@ -2036,7 +2124,7 @@ TEST_F(UtestTensorflowParser, tensorflow_CheckOpShapeDim_test)
dtype_attr_value.set_type(domi::tensorflow::DT_FLOAT);
(*node_attr_map)[TENSORFLOW_ATTR_T] = dtype_attr_value;

//¨¦¨¨??strides¨º?D?
//����??strides��?D?
domi::tensorflow::AttrValue axis_attr_value;
::tensorflow::AttrValue_ListValue* list = axis_attr_value.mutable_list();
list->add_i(1);
@@ -2162,7 +2250,7 @@ TEST_F(UtestTensorflowParser, tensorflow_arg_parser_test)
dtype_attr_value.set_type(domi::tensorflow::DT_FLOAT);
(*node_attr_map)[TENSORFLOW_ATTR_T] = dtype_attr_value;

//¨¦¨¨??strides¨º?D?
//����??strides��?D?
domi::tensorflow::AttrValue axis_attr_value;
::tensorflow::AttrValue_ListValue* list = axis_attr_value.mutable_list();
list->add_i(1);
@@ -2239,7 +2327,7 @@ TEST_F(UtestTensorflowParser, tensorflow_frameworkop_parser_test2)
dtype_attr_value.set_type(domi::tensorflow::DT_FLOAT);
(*node_attr_map)[TENSORFLOW_ATTR_T] = dtype_attr_value;

//¨¦¨¨??strides¨º?D?
//����??strides��?D?
domi::tensorflow::AttrValue axis_attr_value;
::tensorflow::AttrValue_ListValue* list = axis_attr_value.mutable_list();
list->add_i(1);
@@ -2305,7 +2393,7 @@ TEST_F(UtestTensorflowParser, tensorflow_reshape_parser_test)
domi::tensorflow::AttrValue df_attr_value;
df_attr_value.set_i((int64_t)ccTensorFormat_t::CC_TENSOR_NHWC);

//¨¦¨¨??padding¨º?D?
//����??padding��?D?
domi::tensorflow::AttrValue pad_attr_value2;
pad_attr_value2.set_s(TENSORFLOWF_OP_PADDING_SAME);
(*node_attr_map)[TENSORFLOW_ATTR_PADDING] = pad_attr_value2;
@@ -2345,7 +2433,7 @@ TEST_F(UtestTensorflowParser, tensorflow_DefunToPartitionedCall_parser_test)
dtype_attr_value.set_type(domi::tensorflow::DT_FLOAT);
(*node_attr_map)[TENSORFLOW_ATTR_T] = dtype_attr_value;

//¨¦¨¨??strides¨º?D?
//����??strides��?D?
domi::tensorflow::AttrValue axis_attr_value;
::tensorflow::AttrValue_ListValue* list = axis_attr_value.mutable_list();
list->add_i(1);
@@ -2477,7 +2565,7 @@ TEST_F(UtestTensorflowParser, Tensorflow_UpdateFusionOpContext_test)
ge::OpNodeContext normal_op_node_context;
ge::OpNodeContext fusion_op_node_context;

/* 1.?¡è??¨¬??t */
/* 1.?��??��??t */
tensorflow::GraphDef *graph = new tensorflow::GraphDef();
ScopePassManager passmanager;
shared_ptr<ScopeGraph> scope_graph = passmanager.BuildScopeGraph(graph);
@@ -3851,7 +3939,7 @@ TEST_F(UtestTensorflowParser, tensorflow_FP16_parser_test)
fp16.operator=(ui16_val);
ui16_val = 0;
fp16.operator=(ui16_val);
ui16_val = 100000;
ui16_val = 1;
fp16.operator=(ui16_val);

int32_t i32_val = 0;
@@ -3865,4 +3953,282 @@ TEST_F(UtestTensorflowParser, tensorflow_FP16_parser_test)
fp16.operator=(ui32_val);
}

TEST_F(UtestTensorflowParser, tensorflow_AclParserInitialize_test)
{
AclGrphParseUtil parseUtil;
std::map<std::string, std::string> options;
Status ret = parseUtil.AclParserInitialize(options);
EXPECT_EQ(ret, FAILED);

options = {{ge::FRAMEWORK_TYPE, "2"}};
ret = parseUtil.AclParserInitialize(options);
EXPECT_EQ(ret, SUCCESS);
}

TEST_F(UtestTensorflowParser, tensorflow_GetOutputLeaf_test)
{
AclGrphParseUtil parseUtil;
ge::ComputeGraphPtr compute_graph = build_graph(true);
ge::NodePtr output_nodes_info = compute_graph->FindNode("Relu3");
std::vector<std::pair<ge::NodePtr, int32_t>> output_nodes = {{output_nodes_info,0}};
ge::ComputeGraphPtr graph = std::make_shared<ge::ComputeGraph>("default");
ge::NodePtr node = AddNode(compute_graph, "K", parser::NETOUTPUT,1,1);
Status ret = parseUtil.GetOutputLeaf(node, output_nodes);
EXPECT_EQ(ret, FAILED);
}

TEST_F(UtestTensorflowParser, graph_pass_error)
{
ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
ErrorGraphPass pass;
ge::parser::PassManager passManager;
std::vector<std::pair<string, GraphPass*>> passes;
passes.emplace_back("", &pass);
Status status = passManager.Run(graph, passes);
EXPECT_EQ(domi::FAILED, status);
}

TEST_F(UtestTensorflowParser, parser_FindFmkNodeCluser_success)
{
ComputeGraphPtr graph = std::make_shared<ComputeGraph>("FrameworkOp");
ParserGraphOptimizer graphOptimizer(graph, domi::TENSORFLOW);
ge::NodePtr node = AddNode(graph, "K", parser::FRAMEWORK_OP_TYPE, 1, 1);
ge::NodePtr output_nodes_info = graph->FindNode("Relu3");
std::unordered_map<string, vector<NodePtr>> node_cluser_Map({
{"x", {node, output_nodes_info}},
});
Status ret = graphOptimizer.FindFmkNodeCluser(node_cluser_Map);
EXPECT_EQ(ret, SUCCESS);
}

TEST_F(UtestTensorflowParser, parser_RebuildOutputAnchors_test)
{
ge::ComputeGraphPtr subGraph = std::make_shared<ge::ComputeGraph>("default");
ParserGraphOptimizer graphOptimizer(subGraph, domi::TENSORFLOW);
string inputNodeType = "DATA";
MakeDagGraph(subGraph, inputNodeType);

vector<ge::InDataAnchorPtr> in_anchor;
vector<ge::OutDataAnchorPtr> out_anchor;
for(ge::NodePtr node : subGraph->GetAllNodes()) {
for(auto out : node->GetAllOutDataAnchors()) {
for(auto in : node->GetAllInDataAnchors()) {
if(in->GetPeerOutAnchor() != nullptr && in->GetPeerOutAnchor()->GetOwnerNode()->GetOpDesc()->GetType() == parser::DATA) {
in_anchor.push_back(in);
}
}
for(auto i : out->GetPeerInDataAnchors()) {
if(i->GetOwnerNode()->GetOpDesc()->GetType() == parser::NETOUTPUT) {
out_anchor.push_back(out);
}
}
}
}
OpDescPtr fusion_op_desc = make_shared<ge::OpDesc>("FusionCustom", ge::parser::CONSTANT);
Status ret = graphOptimizer.RebuildOutputAnchors(out_anchor, fusion_op_desc);
EXPECT_EQ(domi::SUCCESS, ret);

ret = graphOptimizer.RebuildInputAnchors(in_anchor, fusion_op_desc);
EXPECT_EQ(domi::SUCCESS, ret);
}

TEST_F(UtestTensorflowParser, parser_LinkInnerAnchor_test)
{
ge::ComputeGraphPtr subGraph = std::make_shared<ge::ComputeGraph>("default");
NodePtr node_a = AddNode(subGraph, "A", parser::NETOUTPUT, 1, 1);
NodePtr node_b = AddNode(subGraph, "B", parser::NETOUTPUT, 1, 1);
unordered_map<string, ge::NodePtr> node_map;
node_map.insert(pair<string, ge::NodePtr>("A", node_a));
node_map.insert(pair<string, ge::NodePtr>("B", node_b));

ParserGraphOptimizer graphOptimizer(subGraph, domi::TENSORFLOW);
graphOptimizer.LinkInnerAnchor(node_map);
}

TEST_F(UtestTensorflowParser, parser_MarkForFusion_test)
{
ge::ComputeGraphPtr subGraph = std::make_shared<ge::ComputeGraph>("default");
ParserGraphOptimizer graphOptimizer(subGraph, domi::TENSORFLOW);
ge::NodePtr node = AddNode(subGraph, "K", parser::FRAMEWORK_OP_TYPE, 1, 1);
ge::NodePtr output_nodes_info = subGraph->FindNode("Relu3");
std::unordered_map<string, vector<NodePtr>> node_cluser_Map({
{"x", {node, output_nodes_info}},
});
Status ret = graphOptimizer.MarkForFusion(node_cluser_Map);
EXPECT_EQ(ret, INTERNAL_ERROR);
}

TEST_F(UtestTensorflowParser, parser_UpdateGraph_test)
{
std::vector<NodePtr> nodes;
ge::ComputeGraphPtr subGraph = std::make_shared<ge::ComputeGraph>("default");
ParserGraphOptimizer graphOptimizer(subGraph, domi::TENSORFLOW);
NodePtr node_a = AddNode(subGraph, "A", parser::NETOUTPUT, 1, 1);
NodePtr node_b = AddNode(subGraph, "B", parser::NETOUTPUT, 1, 1);
nodes.emplace_back(node_a);
nodes.emplace_back(node_b);
Status ret = graphOptimizer.UpdateGraph(nodes);
EXPECT_EQ(ret, PARAM_INVALID);
}

TEST_F(UtestTensorflowParser, parser_RebuildFusionNode_test)
{
ge::ComputeGraphPtr graph = std::make_shared<ge::ComputeGraph>(GRAPH_DEFAULT_NAME);
ParserGraphOptimizer graphOptimizer(graph, domi::TENSORFLOW);
string inputNodeType = "DATA";
MakeDagGraph(graph, inputNodeType);
vector<ge::InDataAnchorPtr> input_anchors;
vector<ge::OutDataAnchorPtr> output_anchors;
for(ge::NodePtr node : graph->GetAllNodes()) {
for(auto out : node->GetAllOutDataAnchors()) {
for(auto in : node->GetAllInDataAnchors()) {
if(in->GetPeerOutAnchor() != nullptr && in->GetPeerOutAnchor()->GetOwnerNode()->GetOpDesc()->GetType() == parser::DATA) {
input_anchors.push_back(in);
}
}
for(auto i : out->GetPeerInDataAnchors()) {
if(i->GetOwnerNode()->GetOpDesc()->GetType() == parser::NETOUTPUT) {
output_anchors.push_back(out);
}
}
}
}
map<ge::OutDataAnchorPtr, vector<ge::InDataAnchorPtr>> output_in_map;
vector<ge::InControlAnchorPtr> input_control_anchors;
vector<ge::OutControlAnchorPtr> output_control_anchors;

ge::OpDescPtr op = std::make_shared<ge::OpDesc>("dpop_123", "FrameworkOp");
ge::NodePtr fusion_node = std::make_shared<ge::Node>(op, graph);
Status ret = graphOptimizer.RebuildFusionNode(input_anchors, output_anchors, output_in_map, input_control_anchors, output_control_anchors, fusion_node);
EXPECT_EQ(ret, FAILED);
}

TEST_F(UtestTensorflowParser, parser_InsertNode_test)
{
std::vector<NodePtr> nodes;
ge::ComputeGraphPtr subGraph = std::make_shared<ge::ComputeGraph>("default");
ParserGraphOptimizer graphOptimizer(subGraph, domi::TENSORFLOW);
auto merge_node = AddNode(subGraph, "Merge", parser::MERGE, 1, 2);
auto node1 = AddNode(subGraph, "Op1", parser::RELU, 1, 1);
auto node2 = AddNode(subGraph, "Op2", parser::CONVOLUTION, 1, 1);
auto node3 = AddNode(subGraph, "Op3", parser::CONVOLUTION, 1, 1);
nodes.emplace_back(merge_node);
nodes.emplace_back(node1);
nodes.emplace_back(node2);
nodes.emplace_back(node3);
vector<ge::InDataAnchorPtr> in_anchor;
vector<ge::OutDataAnchorPtr> out_anchor;
map<ge::OutDataAnchorPtr, vector<ge::InDataAnchorPtr>> output_in_map;
vector<ge::InControlAnchorPtr> input_control_anchors;
vector<ge::OutControlAnchorPtr> output_control_anchors;
unordered_map<string, ge::NodePtr> node_map;
node_map.insert(pair<string, ge::NodePtr>("A", merge_node));
node_map.insert(pair<string, ge::NodePtr>("B", node1));
node_map.insert(pair<string, ge::NodePtr>("C", node2));
node_map.insert(pair<string, ge::NodePtr>("D", node3));

Status ret = graphOptimizer.InsertNode(subGraph, nodes, in_anchor, out_anchor, output_in_map, input_control_anchors, output_control_anchors, node_map);
EXPECT_EQ(ret, PARAM_INVALID);
}

TEST_F(UtestTensorflowParser, parser_GeStoi_test)
{
TensorFlowModelParser model_parser;
string input_node_name = "dynamic_rnn_node1";
string index_str = "dynamic_rnn";
int32_t index = 0;

Status ret = model_parser.GeStoi(input_node_name, index_str, &index);
EXPECT_EQ(ret, INTERNAL_ERROR);
}

TEST_F(UtestTensorflowParser, parser_ConstOpNeedUpdate_test)
{
ge::TensorFlowModelParser tensorflow_parser;
NodeDef *op_node_def = new NodeDef();
op_node_def->set_name("OP");
op_node_def->add_input("OP/Input_1");
op_node_def->set_op(TENSORFLOWF_NODE_OP_CONST);

NodeDef *input_node = new NodeDef();
input_node->set_op(TENSORFLOWF_NODE_OP_IDENTITY);
input_node->add_input("OP/Input_1/Input_2");

NodeDef *input_2 = new NodeDef();
input_2->set_op(TENSORFLOWF_NODE_OP_IDENTITY);

tensorflow_parser.nodedef_map_["OP"] = op_node_def;
tensorflow_parser.nodedef_map_["OP/Input_1"] = input_node;
tensorflow_parser.nodedef_map_["OP/Input_1/Input_2"] = input_2;

std::string op_name = "OP/Input_1/Input_2";
Status ret = tensorflow_parser.ConstOpNeedUpdate(op_name);
EXPECT_EQ(ret, true);

op_name = "OP";
ret = tensorflow_parser.ConstOpNeedUpdate(op_name);
EXPECT_EQ(ret, true);

delete op_node_def;
delete input_node;
delete input_2;
}

TEST_F(UtestTensorflowParser, parser_UppdateInputMap_test)
{
ge::TensorFlowModelParser tensorflow_parser;
ScopeFusionOpInfo info;
ge::OpNodeContext normal_op_node_context;
ge::OpNodeContext fusion_op_node_context;

string fusion_op_name = "dropout";
normal_op_node_context.input_map["dropout"].push_back({0, 0});
normal_op_node_context.input_map["conv_conv5/BatchNorm/moving_variance"].push_back({0, 1});
normal_op_node_context.output_map["dropout"].push_back({1, 0});
normal_op_node_context.output_map["conv_conv5/BatchNorm/batchnorm/add/y"].push_back({-1, -1});

tensorflow::GraphDef *graph = new tensorflow::GraphDef();
ScopePassManager passmanager;
shared_ptr<ScopeGraph> scope_graph = passmanager.BuildScopeGraph(graph);
NodeDef *node1 = graph->add_node();
node1->set_name("dropout");
node1->set_op(TENSORFLOWF_NODE_OP_IDENTITY);
node1->add_input("conv_conv5/BatchNorm/moving_variance");
node1->add_input("conv_conv5/BatchNorm/batchnorm/add/y");

NodeDef *node2 = graph->add_node();
node2->set_name("conv_conv5/BatchNorm/moving_variance");
node2->set_op(TENSORFLOWF_NODE_OP_IDENTITY);

NodeDef *node3 = graph->add_node();
node3->set_name("conv_conv5/BatchNorm/batchnorm/add/y");
node3->set_op(TENSORFLOWF_NODE_OP_IDENTITY);

info.fusion_node_name = "conv_conv5/BatchNorm/batchnorm";
info.fusion_op_type = parser::FUSIONBATCHNORM;
info.node_name = "conv_conv5/BatchNorm/batchnorm/add";
info.description = "";
info.scope_pass = false;

tensorflow_parser.nodedef_map_["dropout"] = node1;
tensorflow_parser.nodedef_map_["conv_conv5/BatchNorm/moving_variance"] = node2;
tensorflow_parser.nodedef_map_["conv_conv5/BatchNorm/batchnorm/add/y"] = node3;

Status ret = tensorflow_parser.UppdateInputMap(scope_graph, info, fusion_op_node_context, normal_op_node_context);
EXPECT_EQ(ret, domi::SUCCESS);

ret = tensorflow_parser.UppdateOutputMap(scope_graph, info, fusion_op_node_context, normal_op_node_context);

TensorFlowWeightsParser weights_parser;
std::string caseDir = __FILE__;
std::size_t idx = caseDir.find_last_of("/");
caseDir = caseDir.substr(0, idx);
std::string proto_file = caseDir + "/ /tf_add.pb";
const char *file = proto_file.c_str();
ge::Graph graphs;
Status weightsRet = weights_parser.Parse(file, graphs);
EXPECT_EQ(weightsRet, SUCCESS);
delete graph;
}

} // namespace ge

Loading…
Cancel
Save