Browse Source

!430 increase st testcase

Merge pull request !430 from jwx930962/st_parser
pull/431/MERGE
i-robot Gitee 4 years ago
parent
commit
67dfca5f93
7 changed files with 3085 additions and 83 deletions
  1. +17
    -0
      tests/depends/ops_stub/ops_stub.h
  2. +2320
    -0
      tests/st/testcase/origin_models/ResNet-50-deploy.prototxt
  3. BIN
      tests/st/testcase/origin_models/ResNet-50-model.caffemodel
  4. BIN
      tests/st/testcase/origin_models/caffe_add.caffemodel
  5. +0
    -71
      tests/st/testcase/origin_models/test.json
  6. +337
    -0
      tests/st/testcase/test_caffe_parser.cc
  7. +411
    -12
      tests/st/testcase/test_tensorflow_parser.cc

+ 17
- 0
tests/depends/ops_stub/ops_stub.h View File

@@ -299,6 +299,23 @@ REG_OP(Pooling)
.ATTR(data_format, String, "NCHW")
.OP_END_FACTORY_REG(Pooling)

REG_OP(Flatten)
.INPUT(x, TensorType::ALL())
.OUTPUT(y, TensorType::ALL())
.OP_END_FACTORY_REG(Flatten)

REG_OP(Softmax)
.INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
.OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
.ATTR(axis, Int, 0) // which mean compute which dims
.ATTR(algo, Int, 1) // 1 means using "subtract max from every point to avoid overflow",
// 0 means using "ubtract max from every point to avoid overflow"
// 2 means using "perform the Log softmax operation to avoid overflow"
// now is only support 1
.ATTR(alpha, Float, 1)
.ATTR(beta, Float, 0)
.OP_END_FACTORY_REG(Softmax)

// for plugin
static Status ParseParamsStub(const google::protobuf::Message* op_src, ge::Operator& op_dest) {
return SUCCESS;


+ 2320
- 0
tests/st/testcase/origin_models/ResNet-50-deploy.prototxt
File diff suppressed because it is too large
View File


BIN
tests/st/testcase/origin_models/ResNet-50-model.caffemodel View File


BIN
tests/st/testcase/origin_models/caffe_add.caffemodel View File


+ 0
- 71
tests/st/testcase/origin_models/test.json View File

@@ -1,71 +0,0 @@
{
"node": [
{
"attr": [
{
"key": "dtype",
"value": {
"type": "DT_HALF"
}
},
{
"key": "shape",
"value": {
"shape": {
"dim": [
{
"size": 1
}
]
}
}
}
],
"name": "Placeholder",
"op": "Placeholder"
},
{
"attr": [
{
"key": "dtype",
"value": {
"type": "DT_HALF"
}
},
{
"key": "shape",
"value": {
"shape": {
"dim": [
{
"size": 1
}
]
}
}
}
],
"name": "Placeholder_1",
"op": "Placeholder"
},
{
"attr": [
{
"key": "T",
"value": {
"type": "DT_HALF"
}
}
],
"input": [
"Placeholder",
"Placeholder_1"
],
"name": "add_test_1",
"op": "Add"
}
],
"versions": {
"producer": 134
}
}

+ 337
- 0
tests/st/testcase/test_caffe_parser.cc View File

@@ -15,6 +15,9 @@
*/

#include <gtest/gtest.h>

#define protected public
#define private public
#include "parser/common/op_parser_factory.h"
#include "graph/operator_reg.h"
#include "register/op_registry.h"
@@ -27,6 +30,17 @@
#include "tests/depends/ops_stub/ops_stub.h"
#include "proto/caffe/caffe.pb.h"
#include "parser/caffe/caffe_parser.h"
#include "parser/caffe/caffe_data_parser.h"
#include "parser/caffe/caffe_op_parser.h"
#include "parser/caffe/caffe_custom_parser_adapter.h"
#include "parser/caffe/caffe_op_parser.h"
#include "graph/operator_reg.h"
#include "parser/common/acl_graph_parser_util.h"
#undef protected
#undef private

using namespace domi::caffe;
using namespace ge;

namespace ge {
class STestCaffeParser : public testing::Test {
@@ -45,6 +59,15 @@ class STestCaffeParser : public testing::Test {
static Status ParseParams(const google::protobuf::Message* op_src, ge::Operator& op_dest) {
return SUCCESS;
}

static ge::NodePtr GenNodeFromOpDesc(ge::OpDescPtr opDesc){
if (!opDesc) {
return nullptr;
}
static auto g = std::make_shared<ge::ComputeGraph>("g");
return g->AddNode(std::move(opDesc));
}

void STestCaffeParser::RegisterCustomOp() {
REGISTER_CUSTOM_OP("Data")
.FrameworkType(domi::CAFFE)
@@ -147,4 +170,318 @@ TEST_F(STestCaffeParser, caffe_parser_to_json) {
EXPECT_EQ(ret, FAILED);
}

TEST_F(STestCaffeParser, caffe_parser_ParseParamsForDummyData_test)
{
CaffeDataParser caffe_parser;
domi::caffe::NetParameter net;
ge::OpDescPtr op = std::make_shared<ge::OpDesc>("conv", "Convolution");
domi::caffe::LayerParameter *lay = net.add_layer();
Status ret = caffe_parser.ParseParamsForDummyData(lay, op);
EXPECT_EQ(ret, FAILED);

ret = caffe_parser.ParseParamsForInput(lay, op);
EXPECT_EQ(ret, FAILED);

domi::caffe::DummyDataParameter *dummyData = lay->mutable_dummy_data_param();
ret = caffe_parser.ParseParamsForDummyData(lay, op);
EXPECT_EQ(ret, FAILED);

domi::caffe::BlobShape* dummpShape = dummyData->add_shape();
ret = caffe_parser.ParseParamsForDummyData(lay, op);
EXPECT_EQ(ret, SUCCESS);
}

TEST_F(STestCaffeParser, convertWeights_success)
{
CaffeOpParser parser;
ge::GeTensorDesc ge_tensor_desc = ge::GeTensorDesc();
ge::GeTensorPtr weight = std::make_shared<ge::GeTensor>(ge_tensor_desc);
ge::OpDescPtr opDef = std::make_shared<ge::OpDesc>("","");
auto node_tmp = GenNodeFromOpDesc(opDef);

domi::caffe::LayerParameter *layer = new domi::caffe::LayerParameter();
domi::caffe::BlobProto *blob = layer->add_blobs();
blob->set_int8_data("12");
blob->add_data(1);
blob->add_data(1);

domi::caffe::BlobShape *shap = blob->mutable_shape();
shap->add_dim(1);
shap->add_dim(2);

Status ret = parser.ConvertWeight(*blob, "", weight);
EXPECT_EQ(domi::SUCCESS, ret);
delete layer;
}

TEST_F(STestCaffeParser, CaffeCustomParserAdapter_ParseWeights_success)
{
CaffeCustomParserAdapter parserAdapter;
ge::OpDescPtr opDef = std::make_shared<ge::OpDesc>("","");
auto node_tmp = GenNodeFromOpDesc(opDef);
LayerParameter* layer = new LayerParameter();
Status ret = parserAdapter.ParseWeights(layer, node_tmp);
EXPECT_EQ(ret, SUCCESS);

BlobProto* blob = layer->add_blobs();
blob->add_data(1);
blob->add_data(1);
BlobShape* shap = blob->mutable_shape();
shap->add_dim(1);
shap->add_dim(2);

ret = parserAdapter.ParseWeights(layer, node_tmp);
EXPECT_EQ(ret, SUCCESS);

delete layer;
}

TEST_F(STestCaffeParser, CaffeCustomParserAdapter_ParseParams_success)
{
ge::OpDescPtr op_desc_src = std::make_shared<ge::OpDesc>("Data", "Input");
ge::Operator op_src = ge::OpDescUtils::CreateOperatorFromOpDesc(op_desc_src);
ge::OpDescPtr op_dest = std::make_shared<ge::OpDesc>("Data", "Input");

CaffeCustomParserAdapter parserAdapter;
Status ret = parserAdapter.ParseParams(op_src, op_dest);
EXPECT_EQ(ret, PARAM_INVALID);
}

TEST_F(STestCaffeParser, CaffeDataParser_ParseParams_success)
{
domi::caffe::NetParameter net;
ge::OpDescPtr op_desc_src = std::make_shared<ge::OpDesc>("Data", "Input");
domi::caffe::LayerParameter* lay0 = net.add_layer();
lay0->set_name("conv");
lay0->set_type(ge::parser::DUMMY_DATA);

ge::OpDescPtr opDef = std::make_shared<ge::OpDesc>("","");
CaffeDataParser parserAdapter;
Status ret = parserAdapter.ParseParams(lay0, opDef);
EXPECT_EQ(ret, FAILED);

lay0->set_type(ge::parser::ATTR_NAME_INPUT_TENSOR_DESC);
ret = parserAdapter.ParseParams(lay0, opDef);
EXPECT_EQ(ret, FAILED);
}

TEST_F(STestCaffeParser, CaffeWeightsParser_Parse_test)
{
CaffeWeightsParser weightParser;
std::string case_dir = __FILE__;
case_dir = case_dir.substr(0, case_dir.find_last_of("/"));
std::string model_file = case_dir + "/origin_models/ResNet-50-model.caffemodel";
const char *file = nullptr;
ge::ComputeGraphPtr graph;
Status ret = weightParser.Parse(file, graph);
EXPECT_EQ(ret, PARAM_INVALID);

file = model_file.c_str();
ret = weightParser.Parse(file, graph);
EXPECT_EQ(ret, PARAM_INVALID);

graph = std::make_shared<ComputeGraph>("test");
ret = weightParser.Parse(file, graph);
EXPECT_EQ(ret, FAILED);
}

TEST_F(STestCaffeParser, CaffeWeightsParser_ParseWeightByFusionProto_test)
{
CaffeWeightsParser weightParser;
std::string case_dir = __FILE__;
case_dir = case_dir.substr(0, case_dir.find_last_of("/"));
std::string weight_file = case_dir + "/origin_models/ResNet-50-model.caffemodel";
std::string model_file = case_dir + "/origin_models/caffe.proto";
const char *weight_path = model_file.c_str();
std::string fusion_proto_path = model_file;
std::string fusion_proto_name = "caffe";
ge::ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
Status ret = weightParser.ParseWeightByFusionProto(weight_path, fusion_proto_path, fusion_proto_name, graph);
EXPECT_EQ(ret, FAILED);
}

TEST_F(STestCaffeParser, CaffeWeightsParser_ParseFromMemory_test)
{
CaffeWeightsParser weightParser;
std::string case_dir = __FILE__;
case_dir = case_dir.substr(0, case_dir.find_last_of("/"));
std::string weight_file = case_dir + "/origin_models/ResNet-50-model.caffemodel";
ge::ComputeGraphPtr graph;
const char *data = nullptr;
Status ret = weightParser.ParseFromMemory(data, 1, graph);
EXPECT_EQ(ret, PARAM_INVALID);

data = weight_file.c_str();
ret = weightParser.ParseFromMemory(data, 1, graph);
EXPECT_EQ(ret, PARAM_INVALID);

graph = std::make_shared<ComputeGraph>("test");
ret = weightParser.ParseFromMemory(data, 1, graph);
EXPECT_EQ(ret, domi::PARSE_WEIGHTS_FAILED);

CaffeModelParser model_parser;
ret = model_parser.ParseFromMemory(data, 1, graph);
EXPECT_EQ(ret, FAILED);
}

TEST_F(STestCaffeParser, CaffeWeightsParser_CreateCustomOperator_test)
{
CaffeModelParser model_parser;

vector<ge::Operator> operators;
ge::OpDescPtr op_desc_src = std::make_shared<ge::OpDesc>("Data", "Input");
ge::Operator op_src = ge::OpDescUtils::CreateOperatorFromOpDesc(op_desc_src);
operators.emplace_back(op_src);
std::string op_name = "";
std::string op_type = "";
domi::caffe::NetParameter net;
domi::caffe::LayerParameter *lay0 = net.add_layer();
lay0->set_name("Data");
lay0->set_type("Input");
Status ret = model_parser.CreateCustomOperator(op_name, op_type, &net, 1, operators);
EXPECT_EQ(ret, FAILED);

op_name = "Data";
op_type = "Input";

ret = model_parser.CreateCustomOperator(op_name, op_type, &net, 1, operators);
EXPECT_EQ(ret, SUCCESS);

model_parser.AddOutputInfoToContext(op_name, 1);
}

TEST_F(STestCaffeParser, CaffeWeightsParser_ParseOutputNodeTopInfo_test)
{
CaffeModelParser model_parser;
AclGrphParseUtil acl_graph_parse_util;

domi::caffe::NetParameter net;
domi::caffe::LayerParameter *lay0 = net.add_layer();
lay0->set_name("Data");
lay0->set_type("Input");
Status ret = model_parser.ParseOutputNodeTopInfo(net);
EXPECT_EQ(ret, SUCCESS);

GetParserContext().type = domi::CAFFE;
string graph_name;
std::map<AscendString, AscendString> out_nodes_with_tensor_name1 = {
{AscendString(ge::ir_option::OUT_NODES), AscendString("Out_tensor_1;Out_tensor_2")}};
acl_graph_parse_util.ParseParamsBeforeGraph(out_nodes_with_tensor_name1, graph_name);
ret = model_parser.ParseOutputNodeTopInfo(net);
EXPECT_EQ(ret, PARAM_INVALID);
}

TEST_F(STestCaffeParser, CaffeOpParser_ParseWeightType_test)
{
CaffeOpParser opParser;
ge::GeTensorDesc ge_tensor_desc = ge::GeTensorDesc();
ge::GeTensorPtr weight = std::make_shared<ge::GeTensor>(ge_tensor_desc);
ge::OpDescPtr opDef = std::make_shared<ge::OpDesc>("","");
auto node_tmp = GenNodeFromOpDesc(opDef);

domi::caffe::LayerParameter *layer = new domi::caffe::LayerParameter();
domi::caffe::BlobProto *blob = layer->add_blobs();
blob->set_int8_data("10");
std::string lay_name = "DATA";
GeShape shape({1,1,3,4});
Status ret = opParser.ParseWeightType(*blob, shape, 1, lay_name, weight);
EXPECT_EQ(ret, FAILED);
}

TEST_F(STestCaffeParser, CaffeOpParser_ParseWeightType_test2)
{
CaffeOpParser opParser;
ge::GeTensorDesc ge_tensor_desc = ge::GeTensorDesc();
ge::GeTensorPtr weight = std::make_shared<ge::GeTensor>(ge_tensor_desc);
ge::OpDescPtr opDef = std::make_shared<ge::OpDesc>("","");
auto node_tmp = GenNodeFromOpDesc(opDef);

domi::caffe::LayerParameter *layer = new domi::caffe::LayerParameter();
domi::caffe::BlobProto *blob = layer->add_blobs();
blob->add_int32_data(10);

std::string lay_name = "DATA";
GeShape shape({1,1,3,4});
Status ret = opParser.ParseWeightType(*blob, shape, 1, lay_name, weight);
EXPECT_EQ(ret, SUCCESS);

ret = opParser.ParseWeightType(*blob, shape, 2, lay_name, weight);
EXPECT_EQ(ret, FAILED);
}

TEST_F(STestCaffeParser, CaffeOpParser_ParseWeightType_test3)
{
CaffeOpParser opParser;
ge::GeTensorDesc ge_tensor_desc = ge::GeTensorDesc();
ge::GeTensorPtr weight = std::make_shared<ge::GeTensor>(ge_tensor_desc);
ge::OpDescPtr opDef = std::make_shared<ge::OpDesc>("","");
auto node_tmp = GenNodeFromOpDesc(opDef);

domi::caffe::LayerParameter *layer = new domi::caffe::LayerParameter();
domi::caffe::BlobProto *blob = layer->add_blobs();
double value = 2.0;
blob->add_double_data(value);

std::string lay_name = "DATA";
GeShape shape({1,1,3,4});
Status ret = opParser.ParseWeightType(*blob, shape, 1, lay_name, weight);
EXPECT_EQ(ret, SUCCESS);

ret = opParser.ParseWeightType(*blob, shape, 3, lay_name, weight);
EXPECT_EQ(ret, FAILED);
}

TEST_F(STestCaffeParser, CaffeOpParser_ParseWeightType_test4)
{
CaffeOpParser opParser;
ge::GeTensorDesc ge_tensor_desc = ge::GeTensorDesc();
ge::GeTensorPtr weight = std::make_shared<ge::GeTensor>(ge_tensor_desc);
ge::OpDescPtr opDef = std::make_shared<ge::OpDesc>("","");
auto node_tmp = GenNodeFromOpDesc(opDef);

domi::caffe::LayerParameter *layer = new domi::caffe::LayerParameter();
domi::caffe::BlobProto *blob = layer->add_blobs();
blob->add_uint64_data(10);

std::string lay_name = "DATA";
GeShape shape({1,1,3,4});
Status ret = opParser.ParseWeightType(*blob, shape, 1, lay_name, weight);
EXPECT_EQ(ret, SUCCESS);

ret = opParser.ParseWeightType(*blob, shape, 2, lay_name, weight);
EXPECT_EQ(ret, FAILED);
}

TEST_F(STestCaffeParser, CaffeOpParser_ParseWeightType_test5)
{
CaffeOpParser opParser;
ge::GeTensorDesc ge_tensor_desc = ge::GeTensorDesc();
ge::GeTensorPtr weight = std::make_shared<ge::GeTensor>(ge_tensor_desc);
ge::OpDescPtr opDef = std::make_shared<ge::OpDesc>("","");
auto node_tmp = GenNodeFromOpDesc(opDef);

domi::caffe::LayerParameter *layer = new domi::caffe::LayerParameter();
domi::caffe::BlobProto *blob = layer->add_blobs();
blob->add_data(10);

std::string lay_name = "DATA";
GeShape shape({1,1,3,4});
Status ret = opParser.ParseWeightType(*blob, shape, 10, lay_name, weight);
EXPECT_EQ(ret, FAILED);
}

TEST_F(STestCaffeParser, CaffeOpParser_ConvertShape_test)
{
CaffeOpParser opParser;
domi::caffe::LayerParameter *layer = new domi::caffe::LayerParameter();
domi::caffe::BlobProto *blob = layer->add_blobs();
blob->set_num(1);
blob->set_channels(2);
blob->set_height(1);
blob->set_width(1);
std::vector<int64_t> shape;

opParser.ConvertShape(*blob, shape);
}

} // namespace ge

+ 411
- 12
tests/st/testcase/test_tensorflow_parser.cc View File

@@ -67,6 +67,11 @@
#include "parser/common/parser_fp16_t.h"
#include "parser/common/op_parser_factory.h"
#include "parser/common/prototype_pass_manager.h"
#include "parser/common/register_tbe.h"
#include "parser/common/pass_manager.h"
#include "parser/tensorflow/graph_optimizer.h"
#include "metadef/inc/register/scope/scope_pass_registry_impl.h"
#include "register/scope/scope_fusion_pass_register.h"
#undef protected
#undef private

@@ -106,8 +111,16 @@ public:
}
};

class ErrorGraphPass: public GraphPass
{
Status Run(ComputeGraphPtr graph)
{
return domi::FAILED;
}
};

class ScopeTestPass : public ScopeBasePass {
protected:
protected:
vector<ScopeFusionPatterns> DefinePatterns() {
vector<ScopeFusionPatterns> patterns_list;
return patterns_list;
@@ -730,12 +743,98 @@ namespace {

NodeDef* AddGraphNode(GraphDef *graph, string name, string optype, string input)
{
NodeDef * node_def = graph->add_node();
NodeDef *node_def = graph->add_node();
node_def->set_name(name);
node_def->set_op(optype);
node_def->add_input(input);
return node_def;
}

ge::ComputeGraphPtr build_graph(bool with_leaf_node = false)
{
ge::ComputeGraphPtr graph = std::make_shared<ge::ComputeGraph>("default");
ge::OpDescPtr data_op = std::make_shared<ge::OpDesc>();
data_op->SetType(parser::DATA);
data_op->SetName("Data1");
data_op->AddInputDesc(ge::GeTensorDesc());
data_op->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr data1 = graph->AddNode(data_op);

ge::OpDescPtr relu_op1 = std::make_shared<ge::OpDesc>();
relu_op1->SetType(parser::ACTIVATION);
relu_op1->SetName("Relu1");
relu_op1->AddInputDesc(ge::GeTensorDesc());
relu_op1->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr relu1 = graph->AddNode(relu_op1);

ge::OpDescPtr relu_op2 = std::make_shared<ge::OpDesc>();
relu_op2->SetType(parser::RELU);
relu_op2->SetName("Relu2");
relu_op2->AddInputDesc(ge::GeTensorDesc());
relu_op2->AddOutputDesc(ge::GeTensorDesc());
relu_op2->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr relu2 = graph->AddNode(relu_op2);

ge::OpDescPtr relu_op3 = std::make_shared<ge::OpDesc>();
relu_op3->SetType(parser::ACTIVATION);
relu_op3->SetName("Relu3");
relu_op3->AddInputDesc(ge::GeTensorDesc());
relu_op3->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr relu3;
if (with_leaf_node == true) {
relu3 = graph->AddNode(relu_op3);
}

ge::OpDescPtr mul_op = std::make_shared<ge::OpDesc>();
mul_op->SetType(parser::MUL);
mul_op->SetName("Mul");
mul_op->AddInputDesc(ge::GeTensorDesc());
mul_op->AddInputDesc(ge::GeTensorDesc());
mul_op->AddOutputDesc(ge::GeTensorDesc());
mul_op->AddOutputDesc(ge::GeTensorDesc());
mul_op->AddOutputDesc(ge::GeTensorDesc());
mul_op->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr mul = graph->AddNode(mul_op);

ge::OpDescPtr mul_op1 = std::make_shared<ge::OpDesc>();
mul_op1->SetType(parser::MUL);
mul_op1->SetName("Mul1");
mul_op1->AddInputDesc(ge::GeTensorDesc());
mul_op1->AddInputDesc(ge::GeTensorDesc());
mul_op1->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr mul1 = graph->AddNode(mul_op1);

ge::OpDescPtr mul_op2 = std::make_shared<ge::OpDesc>();
mul_op2->SetType(parser::MUL);
mul_op2->SetName("Mul2");
mul_op2->AddInputDesc(ge::GeTensorDesc());
mul_op2->AddInputDesc(ge::GeTensorDesc());
mul_op2->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr mul2 = graph->AddNode(mul_op2);

ge::OpDescPtr fc_op = std::make_shared<ge::OpDesc>();
fc_op->SetType(parser::FULL_CONNECTION);
fc_op->SetName("FullConnection");
fc_op->AddInputDesc(ge::GeTensorDesc());
fc_op->AddOutputDesc(ge::GeTensorDesc());
fc_op->AddOutputDesc(ge::GeTensorDesc());
ge::NodePtr fc = graph->AddNode(fc_op);

ge::GraphUtils::AddEdge(data1->GetOutDataAnchor(0), relu1->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(relu1->GetOutDataAnchor(0), fc->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(fc->GetOutDataAnchor(0), relu2->GetInDataAnchor(0));
if (with_leaf_node == true) {
ge::GraphUtils::AddEdge(fc->GetOutDataAnchor(1), relu3->GetInDataAnchor(0));
}
ge::GraphUtils::AddEdge(relu2->GetOutDataAnchor(0), mul->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(relu2->GetOutDataAnchor(1), mul->GetInDataAnchor(1));
ge::GraphUtils::AddEdge(mul->GetOutDataAnchor(0), mul1->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(mul->GetOutDataAnchor(1), mul1->GetInDataAnchor(1));
ge::GraphUtils::AddEdge(mul->GetOutDataAnchor(2), mul2->GetInDataAnchor(0));
ge::GraphUtils::AddEdge(mul->GetOutDataAnchor(3), mul2->GetInDataAnchor(1));

return graph;
}
}

namespace {
@@ -1994,8 +2093,7 @@ TEST_F(STestTensorflowParser, tensorflow_Scope_pass_test)
}

ge::TensorFlowModelParser tf_model_parser;
std::vector<string> scope_passes_list = {"pass_1", "pass_2"};
tf_model_parser.RunScopeFusionPass(scope_passes_list, passmanager, scope_graph);
std::vector<string> scope_passes_list = {"ScopeBasicLSTMCellPass", "ScopeLayerNormPass"};
Status ret = tf_model_parser.RunScopeFusionPass(scope_passes_list, passmanager, scope_graph);
EXPECT_NE(ge::SUCCESS, ret);
}
@@ -2935,6 +3033,8 @@ TEST_F(STestTensorflowParser, tensorflow_GetNodeFormat_test)
TensorFlowModelParser model_parser;
Status ret = model_parser.GetNodeFormat(node_def1, pred_transpose, format, visited_node);
EXPECT_EQ(ret, FAILED);


delete node_def1;
delete node_def2;
}
@@ -2947,20 +3047,27 @@ TEST_F(STestTensorflowParser, tensorflow_GetFormatTranspose_test)
TensorFlowModelParser modelParser;
Status ret = modelParser.GetFormatTranspose(transpose_node, transpose_direc);
EXPECT_EQ(ret, FAILED);
delete transpose_node;
}

ge::TensorFlowModelParser parser;
TEST_F(STestTensorflowParser, tensorflow_GetFormatTranspose_test2)
{
TensorFlowModelParser modelParser;
TfTranspose transpose_direc = NO_TRANSPOSE;
NodeDef *transpose_node = initNodeDef();
GraphDef graph;

auto arg0 = AddNode(graph, "_Arg", "arg0");
auto snapshot0 = AddNode(graph, "Snapshot", "snapshot0");
auto ret0 = AddNode(graph, "_Retval", "retval0");

auto arg1 = AddNode(graph, "_Arg", "arg1");
auto snapshot1 = AddNode(graph, "Snapshot", "snapshot1");
auto ret1 = AddNode(graph, "_Retval", "retval1");
auto ret1 = AddNode(graph, TENSORFLOWF_NODE_OP_TRANSPOSE, "retval1");

auto arg2 = AddNode(graph, "_Arg", "arg2");
auto snapshot2 = AddNode(graph, "Snapshot", "snapshot2");
auto ret2 = AddNode(graph, "_Retval", "retval2");
auto ret2 = AddNode(graph, TENSORFLOWF_NODE_OP_TRANSPOSE, TENSORFLOWF_NODE_OP_TRANSPOSE);

AddInput(arg0, snapshot0, 0);
AddInput(snapshot0, ret0, 0);
@@ -2971,9 +3078,14 @@ TEST_F(STestTensorflowParser, tensorflow_GetFormatTranspose_test)
AddInput(snapshot0, snapshot1, -1);
AddInput(snapshot1, snapshot2, -1);

ASSERT_EQ(parser.GraphDefOptimize(&graph), domi::SUCCESS);
ASSERT_EQ(ret1->input_size(), 2);
ret = modelParser.GetFormatTranspose(ret1, transpose_direc);
bool train_flag = ge::GetParserContext().train_flag;
ge::GetParserContext().train_flag = true;
ASSERT_EQ(modelParser.GraphDefOptimize(&graph), SUCCESS);
ge::GetParserContext().train_flag = train_flag;

modelParser.nodedef_map_["arg1"] = transpose_node;
modelParser.nodedef_map_["^arg0"] = transpose_node;
Status ret = modelParser.GetFormatTranspose(ret1, transpose_direc);
EXPECT_EQ(ret, SUCCESS);
delete transpose_node;
}
@@ -3650,7 +3762,6 @@ TEST_F(STestTensorflowParser, tensorflow_ModelSaver_test)
ret = ge::parser::ModelSaver::SaveJsonToFile(file_path, model);
EXPECT_EQ(ret, FAILED);

std::cout << __FILE__ << std::endl;
std::string caseDir = __FILE__;
std::size_t idx = caseDir.find_last_of("/");
caseDir = caseDir.substr(0, idx);
@@ -3755,7 +3866,7 @@ TEST_F(STestTensorflowParser, tensorflow_FP16_parser_test)
fp16.operator=(ui16_val);
ui16_val = 0;
fp16.operator=(ui16_val);
ui16_val = 100000;
ui16_val = 1;
fp16.operator=(ui16_val);

int32_t i32_val = 0;
@@ -3769,4 +3880,292 @@ TEST_F(STestTensorflowParser, tensorflow_FP16_parser_test)
fp16.operator=(ui32_val);
}

TEST_F(STestTensorflowParser, tensorflow_AclParserInitialize_test)
{
AclGrphParseUtil parseUtil;
std::map<std::string, std::string> options;
Status ret = parseUtil.AclParserInitialize(options);
EXPECT_EQ(ret, FAILED);

options = {{ge::FRAMEWORK_TYPE, "2"}};
ret = parseUtil.AclParserInitialize(options);
EXPECT_EQ(ret, SUCCESS);
}

TEST_F(STestTensorflowParser, tensorflow_GetOutputLeaf_test)
{
AclGrphParseUtil parseUtil;
ge::ComputeGraphPtr compute_graph = build_graph(true);
ge::NodePtr output_nodes_info = compute_graph->FindNode("Relu3");
std::vector<std::pair<ge::NodePtr, int32_t>> output_nodes = {{output_nodes_info,0}};
ge::ComputeGraphPtr graph = std::make_shared<ge::ComputeGraph>("default");
ge::NodePtr node = AddNode(compute_graph, "K", parser::NETOUTPUT,1,1);
Status ret = parseUtil.GetOutputLeaf(node, output_nodes);
EXPECT_EQ(ret, FAILED);
}

TEST_F(STestTensorflowParser, graph_pass_error)
{
ComputeGraphPtr graph = std::make_shared<ComputeGraph>("test");
ErrorGraphPass pass;
ge::parser::PassManager passManager;
std::vector<std::pair<string, GraphPass*>> passes;
passes.emplace_back("", &pass);
Status status = passManager.Run(graph, passes);
EXPECT_EQ(domi::FAILED, status);
}

TEST_F(STestTensorflowParser, parser_FindFmkNodeCluser_success)
{
ComputeGraphPtr graph = std::make_shared<ComputeGraph>("FrameworkOp");
ParserGraphOptimizer graphOptimizer(graph, domi::TENSORFLOW);
ge::NodePtr node = AddNode(graph, "K", parser::FRAMEWORK_OP_TYPE, 1, 1);
ge::NodePtr output_nodes_info = graph->FindNode("Relu3");
std::unordered_map<string, vector<NodePtr>> node_cluser_Map({
{"x", {node, output_nodes_info}},
});
Status ret = graphOptimizer.FindFmkNodeCluser(node_cluser_Map);
EXPECT_EQ(ret, SUCCESS);

// node = AddNode(graph, "K", parser::NETOUTPUT, 1, 1);
// NodePtr node_a = AddNode(graph, "A", parser::NETOUTPUT, 1, 1);
// NodePtr node_b = AddNode(graph, "B", parser::NETOUTPUT, 1, 1);
// std::unordered_map<string, vector<NodePtr>> node_cluser_Map2({
// {"x", {node, node_a, node_b}},
// });
// ret = graphOptimizer.FindFmkNodeCluser(node_cluser_Map2);
// EXPECT_EQ(ret, SUCCESS);
}

TEST_F(STestTensorflowParser, parser_RebuildOutputAnchors_test)
{
ge::ComputeGraphPtr subGraph = std::make_shared<ge::ComputeGraph>("default");
ParserGraphOptimizer graphOptimizer(subGraph, domi::TENSORFLOW);
string inputNodeType = "DATA";
MakeDagGraph(subGraph, inputNodeType);

vector<ge::InDataAnchorPtr> in_anchor;
vector<ge::OutDataAnchorPtr> out_anchor;
for(ge::NodePtr node : subGraph->GetAllNodes()) {
for(auto out : node->GetAllOutDataAnchors()) {
for(auto in : node->GetAllInDataAnchors()) {
if(in->GetPeerOutAnchor() != nullptr && in->GetPeerOutAnchor()->GetOwnerNode()->GetOpDesc()->GetType() == parser::DATA) {
in_anchor.push_back(in);
}
}
for(auto i : out->GetPeerInDataAnchors()) {
if(i->GetOwnerNode()->GetOpDesc()->GetType() == parser::NETOUTPUT) {
out_anchor.push_back(out);
}
}
}
}
OpDescPtr fusion_op_desc = make_shared<ge::OpDesc>("FusionCustom", ge::parser::CONSTANT);
Status ret = graphOptimizer.RebuildOutputAnchors(out_anchor, fusion_op_desc);
EXPECT_EQ(domi::SUCCESS, ret);

ret = graphOptimizer.RebuildInputAnchors(in_anchor, fusion_op_desc);
EXPECT_EQ(domi::SUCCESS, ret);
}

TEST_F(STestTensorflowParser, parser_LinkInnerAnchor_test)
{
ge::ComputeGraphPtr subGraph = std::make_shared<ge::ComputeGraph>("default");
NodePtr node_a = AddNode(subGraph, "A", parser::NETOUTPUT, 1, 1);
NodePtr node_b = AddNode(subGraph, "B", parser::NETOUTPUT, 1, 1);
unordered_map<string, ge::NodePtr> node_map;
node_map.insert(pair<string, ge::NodePtr>("A", node_a));
node_map.insert(pair<string, ge::NodePtr>("B", node_b));

ParserGraphOptimizer graphOptimizer(subGraph, domi::TENSORFLOW);
graphOptimizer.LinkInnerAnchor(node_map);
}

TEST_F(STestTensorflowParser, parser_MarkForFusion_test)
{
ge::ComputeGraphPtr subGraph = std::make_shared<ge::ComputeGraph>("default");
ParserGraphOptimizer graphOptimizer(subGraph, domi::TENSORFLOW);
ge::NodePtr node = AddNode(subGraph, "K", parser::FRAMEWORK_OP_TYPE, 1, 1);
ge::NodePtr output_nodes_info = subGraph->FindNode("Relu3");
std::unordered_map<string, vector<NodePtr>> node_cluser_Map({
{"x", {node, output_nodes_info}},
});
Status ret = graphOptimizer.MarkForFusion(node_cluser_Map);
EXPECT_EQ(ret, INTERNAL_ERROR);
}

TEST_F(STestTensorflowParser, parser_UpdateGraph_test)
{
std::vector<NodePtr> nodes;
ge::ComputeGraphPtr subGraph = std::make_shared<ge::ComputeGraph>("default");
ParserGraphOptimizer graphOptimizer(subGraph, domi::TENSORFLOW);
NodePtr node_a = AddNode(subGraph, "A", parser::NETOUTPUT, 1, 1);
NodePtr node_b = AddNode(subGraph, "B", parser::NETOUTPUT, 1, 1);
nodes.emplace_back(node_a);
nodes.emplace_back(node_b);
Status ret = graphOptimizer.UpdateGraph(nodes);
EXPECT_EQ(ret, PARAM_INVALID);
}

TEST_F(STestTensorflowParser, parser_RebuildFusionNode_test)
{
ge::ComputeGraphPtr graph = std::make_shared<ge::ComputeGraph>(GRAPH_DEFAULT_NAME);
ParserGraphOptimizer graphOptimizer(graph, domi::TENSORFLOW);
string inputNodeType = "DATA";
MakeDagGraph(graph, inputNodeType);
vector<ge::InDataAnchorPtr> input_anchors;
vector<ge::OutDataAnchorPtr> output_anchors;
for(ge::NodePtr node : graph->GetAllNodes()) {
for(auto out : node->GetAllOutDataAnchors()) {
for(auto in : node->GetAllInDataAnchors()) {
if(in->GetPeerOutAnchor() != nullptr && in->GetPeerOutAnchor()->GetOwnerNode()->GetOpDesc()->GetType() == parser::DATA) {
input_anchors.push_back(in);
}
}
for(auto i : out->GetPeerInDataAnchors()) {
if(i->GetOwnerNode()->GetOpDesc()->GetType() == parser::NETOUTPUT) {
output_anchors.push_back(out);
}
}
}
}
map<ge::OutDataAnchorPtr, vector<ge::InDataAnchorPtr>> output_in_map;
vector<ge::InControlAnchorPtr> input_control_anchors;
vector<ge::OutControlAnchorPtr> output_control_anchors;

ge::OpDescPtr op = std::make_shared<ge::OpDesc>("dpop_123", "FrameworkOp");
ge::NodePtr fusion_node = std::make_shared<ge::Node>(op, graph);
Status ret = graphOptimizer.RebuildFusionNode(input_anchors, output_anchors, output_in_map, input_control_anchors, output_control_anchors, fusion_node);
EXPECT_EQ(ret, FAILED);
}

TEST_F(STestTensorflowParser, parser_InsertNode_test)
{
std::vector<NodePtr> nodes;
ge::ComputeGraphPtr subGraph = std::make_shared<ge::ComputeGraph>("default");
ParserGraphOptimizer graphOptimizer(subGraph, domi::TENSORFLOW);
auto merge_node = AddNode(subGraph, "Merge", parser::MERGE, 1, 2);
auto node1 = AddNode(subGraph, "Op1", parser::RELU, 1, 1);
auto node2 = AddNode(subGraph, "Op2", parser::CONVOLUTION, 1, 1);
auto node3 = AddNode(subGraph, "Op3", parser::CONVOLUTION, 1, 1);
nodes.emplace_back(merge_node);
nodes.emplace_back(node1);
nodes.emplace_back(node2);
nodes.emplace_back(node3);
vector<ge::InDataAnchorPtr> in_anchor;
vector<ge::OutDataAnchorPtr> out_anchor;
map<ge::OutDataAnchorPtr, vector<ge::InDataAnchorPtr>> output_in_map;
vector<ge::InControlAnchorPtr> input_control_anchors;
vector<ge::OutControlAnchorPtr> output_control_anchors;
unordered_map<string, ge::NodePtr> node_map;
node_map.insert(pair<string, ge::NodePtr>("A", merge_node));
node_map.insert(pair<string, ge::NodePtr>("B", node1));
node_map.insert(pair<string, ge::NodePtr>("C", node2));
node_map.insert(pair<string, ge::NodePtr>("D", node3));

Status ret = graphOptimizer.InsertNode(subGraph, nodes, in_anchor, out_anchor, output_in_map, input_control_anchors, output_control_anchors, node_map);
EXPECT_EQ(ret, PARAM_INVALID);
}

TEST_F(STestTensorflowParser, parser_GeStoi_test)
{
TensorFlowModelParser model_parser;
string input_node_name = "dynamic_rnn_node1";
string index_str = "dynamic_rnn";
int32_t index = 0;

Status ret = model_parser.GeStoi(input_node_name, index_str, &index);
EXPECT_EQ(ret, INTERNAL_ERROR);
}

TEST_F(STestTensorflowParser, parser_ConstOpNeedUpdate_test)
{
ge::TensorFlowModelParser tensorflow_parser;
NodeDef *op_node_def = new NodeDef();
op_node_def->set_name("OP");
op_node_def->add_input("OP/Input_1");
op_node_def->set_op(TENSORFLOWF_NODE_OP_CONST);

NodeDef *input_node = new NodeDef();
input_node->set_op(TENSORFLOWF_NODE_OP_IDENTITY);
input_node->add_input("OP/Input_1/Input_2");

NodeDef *input_2 = new NodeDef();
input_2->set_op(TENSORFLOWF_NODE_OP_IDENTITY);

tensorflow_parser.nodedef_map_["OP"] = op_node_def;
tensorflow_parser.nodedef_map_["OP/Input_1"] = input_node;
tensorflow_parser.nodedef_map_["OP/Input_1/Input_2"] = input_2;

std::string op_name = "OP/Input_1/Input_2";
Status ret = tensorflow_parser.ConstOpNeedUpdate(op_name);
EXPECT_EQ(ret, true);

op_name = "OP";
ret = tensorflow_parser.ConstOpNeedUpdate(op_name);
EXPECT_EQ(ret, true);

delete op_node_def;
delete input_node;
delete input_2;
}

TEST_F(STestTensorflowParser, parser_UppdateInputMap_test)
{
ge::TensorFlowModelParser tensorflow_parser;
ScopeFusionOpInfo info;
ge::OpNodeContext normal_op_node_context;
ge::OpNodeContext fusion_op_node_context;

string fusion_op_name = "dropout";
normal_op_node_context.input_map["dropout"].push_back({0, 0});
normal_op_node_context.input_map["conv_conv5/BatchNorm/moving_variance"].push_back({0, 1});
normal_op_node_context.output_map["dropout"].push_back({1, 0});
normal_op_node_context.output_map["conv_conv5/BatchNorm/batchnorm/add/y"].push_back({-1, -1});

tensorflow::GraphDef *graph = new tensorflow::GraphDef();
ScopePassManager passmanager;
shared_ptr<ScopeGraph> scope_graph = passmanager.BuildScopeGraph(graph);
NodeDef *node1 = graph->add_node();
node1->set_name("dropout");
node1->set_op(TENSORFLOWF_NODE_OP_IDENTITY);
node1->add_input("conv_conv5/BatchNorm/moving_variance");
node1->add_input("conv_conv5/BatchNorm/batchnorm/add/y");

NodeDef *node2 = graph->add_node();
node2->set_name("conv_conv5/BatchNorm/moving_variance");
node2->set_op(TENSORFLOWF_NODE_OP_IDENTITY);

NodeDef *node3 = graph->add_node();
node3->set_name("conv_conv5/BatchNorm/batchnorm/add/y");
node3->set_op(TENSORFLOWF_NODE_OP_IDENTITY);

info.fusion_node_name = "conv_conv5/BatchNorm/batchnorm";
info.fusion_op_type = parser::FUSIONBATCHNORM;
info.node_name = "conv_conv5/BatchNorm/batchnorm/add";
info.description = "";
info.scope_pass = false;

tensorflow_parser.nodedef_map_["dropout"] = node1;
tensorflow_parser.nodedef_map_["conv_conv5/BatchNorm/moving_variance"] = node2;
tensorflow_parser.nodedef_map_["conv_conv5/BatchNorm/batchnorm/add/y"] = node3;

Status ret = tensorflow_parser.UppdateInputMap(scope_graph, info, fusion_op_node_context, normal_op_node_context);
EXPECT_EQ(ret, domi::SUCCESS);

ret = tensorflow_parser.UppdateOutputMap(scope_graph, info, fusion_op_node_context, normal_op_node_context);
EXPECT_EQ(ret, 32767);

TensorFlowWeightsParser weights_parser;
std::string caseDir = __FILE__;
std::size_t idx = caseDir.find_last_of("/");
caseDir = caseDir.substr(0, idx);
std::string proto_file = caseDir + "/origin_models/tf_add.pb";
const char *file = proto_file.c_str();
ge::Graph graphs;
Status weightsRet = weights_parser.Parse(file, graphs);
EXPECT_EQ(weightsRet, SUCCESS);
delete graph;
}

} // namespace ge

Loading…
Cancel
Save