From 2ca6b7fbdee73988ef71b9989f06415f17a60787 Mon Sep 17 00:00:00 2001 From: zhaodezan Date: Fri, 12 Mar 2021 18:52:55 +0800 Subject: [PATCH] delete nodetype in model.fbs --- mindspore/lite/include/lite_utils.h | 1 - mindspore/lite/include/model.h | 1 + mindspore/lite/micro/coder/graph.cc | 4 +- mindspore/lite/schema/model.fbs | 10 +---- mindspore/lite/src/common/utils.h | 8 +++- mindspore/lite/src/lite_model.h | 1 - .../src/ops/compat/attr_transfer_common.cc | 4 +- mindspore/lite/src/tensor.h | 5 ++- .../test/common/import_from_meta_graphT.cc | 2 +- mindspore/lite/test/st/control_flow_test.cc | 36 +++++++++--------- mindspore/lite/test/st/sub_graph_test.cc | 32 ++++++++-------- mindspore/lite/test/ut/src/infer_test.cc | 18 ++++----- .../kernel/arm/fp32_grad/network_test.cc | 30 +++++++-------- .../kernel/arm/int8/deconv_int8_tests.cc | 1 - .../runtime/kernel/arm/int8/pad_int8_tests.cc | 1 - mindspore/lite/test/ut/src/scheduler_test.cc | 38 +++++++++---------- .../fusion/constant_folding_fusion_test.cc | 34 ++++++++--------- .../fusion/conv_activation_fusion_test.cc | 8 ++-- .../fusion/conv_biasadd_fusion_test.cc | 10 ++--- .../optimizer/fusion/conv_bn_fusion_test.cc | 30 +++++++-------- .../fusion/conv_scale_fusion_test.cc | 14 +++---- .../lite/tools/anf_exporter/anf_exporter.cc | 21 +++++----- mindspore/lite/tools/common/graph_util.cc | 4 +- mindspore/lite/tools/common/tensor_util.h | 5 ++- .../fusion/matmul_biasadd_fusion_pass.cc | 2 +- .../fusion/mul_add_fusion_pass.cc | 4 +- .../graph/batchnorm_convert_scale_pass.cc | 4 +- .../legacy_optimizer/graph/switch_pass.cc | 2 +- .../graph/topological_sort_pass.cc | 2 +- .../parser/caffe/caffe_node_parser.cc | 3 +- 30 files changed, 167 insertions(+), 168 deletions(-) diff --git a/mindspore/lite/include/lite_utils.h b/mindspore/lite/include/lite_utils.h index 9e09613b63..cd4127aa55 100644 --- a/mindspore/lite/include/lite_utils.h +++ b/mindspore/lite/include/lite_utils.h @@ -38,7 +38,6 @@ using TensorPtrVector = std::vector; using DeviceContextVector = std::vector; using Uint32Vector = std::vector; using String = std::string; -using NodeType = int; /**< 0 : NodeType_ValueNode, 1 : NodeType_Parameter, 2 : NodeType_CNode. */ using AllocatorPtr = std::shared_ptr; /// \brief Set data of MSTensor from string vector. diff --git a/mindspore/lite/include/model.h b/mindspore/lite/include/model.h index 1f73e9e963..de234a4107 100644 --- a/mindspore/lite/include/model.h +++ b/mindspore/lite/include/model.h @@ -17,6 +17,7 @@ #define MINDSPORE_LITE_INCLUDE_MODEL_H_ #include #include "include/lite_utils.h" +#include "src/common/utils.h" namespace mindspore::lite { struct MS_API Model { diff --git a/mindspore/lite/micro/coder/graph.cc b/mindspore/lite/micro/coder/graph.cc index bec25bc076..a404136e28 100644 --- a/mindspore/lite/micro/coder/graph.cc +++ b/mindspore/lite/micro/coder/graph.cc @@ -59,7 +59,7 @@ int CoderGraph::ConvertTensors() { MS_CHECK_PTR_WITH_EXE(origin_tensor, clear_tensors()); // tensor dims std::vector shape; - if (origin_tensor->nodeType() == schema::NodeType_ValueNode) { + if (origin_tensor->nodeType() == NodeType_ValueNode) { MS_CHECK_PTR_WITH_EXE(origin_tensor->dims(), clear_tensors()); for (uint32_t j = 0; j < origin_tensor->dims()->size(); j++) { MS_CHECK_PTR(origin_tensor->dims()->data()); @@ -73,7 +73,7 @@ int CoderGraph::ConvertTensors() { Tensor *dstTensor = new (std::nothrow) lite::Tensor(TypeId(origin_data_type), shape, origin_tensor->format(), TensorCategory(origin_tensor)); MS_CHECK_PTR(dstTensor); - if (origin_tensor->nodeType() == schema::NodeType_ValueNode && origin_tensor->data() != nullptr && + if (origin_tensor->nodeType() == NodeType_ValueNode && origin_tensor->data() != nullptr && origin_tensor->data()->size() > 0) { if (shape.empty()) { shape.push_back(1); diff --git a/mindspore/lite/schema/model.fbs b/mindspore/lite/schema/model.fbs index 7a4a14a0c9..50fdbfcaf4 100644 --- a/mindspore/lite/schema/model.fbs +++ b/mindspore/lite/schema/model.fbs @@ -23,12 +23,6 @@ file_identifier "MSL2"; // File extension of any written files. file_extension "ms"; -enum NodeType: int { - ValueNode, // const - Parameter, // var - CNode // op -} - table QuantParam { scale: double; zeroPoint: int; @@ -45,7 +39,7 @@ table QuantParam { } table Tensor { - nodeType: NodeType; + nodeType: int; // data type dataType: int; // shape @@ -73,7 +67,7 @@ table Primitive { table CNode { name: string; - nodeType: NodeType = CNode; + nodeType: int (deprecated); primitive: Primitive; inputIndex: [uint]; outputIndex: [uint]; diff --git a/mindspore/lite/src/common/utils.h b/mindspore/lite/src/common/utils.h index db023d6b3e..fca624997a 100644 --- a/mindspore/lite/src/common/utils.h +++ b/mindspore/lite/src/common/utils.h @@ -29,6 +29,12 @@ namespace mindspore { namespace lite { +enum NodeType { + NodeType_ValueNode, // const + NodeType_Parameter, // var + NodeType_CNode // op +}; + const int USEC = 1000000; const int MSEC = 1000; std::vector StringSplit(std::string str, const std::string &pattern); @@ -154,7 +160,7 @@ std::vector Tokenize(const std::string &src, const std::string &del enum RemoveSubStrMode { PREFIX, SUFFIX, ANY }; -// remove redundant charactor +// remove redundant character std::string RemoveSubStr(const std::string &from, const std::string &sub_str, RemoveSubStrMode mode = ANY); template diff --git a/mindspore/lite/src/lite_model.h b/mindspore/lite/src/lite_model.h index 7885e27da7..d7a6447ba7 100644 --- a/mindspore/lite/src/lite_model.h +++ b/mindspore/lite/src/lite_model.h @@ -66,7 +66,6 @@ class LiteModel : public Model { node->primitive_ = c_node->primitive(); node->quant_type_ = c_node->quantType(); node->name_ = c_node->name()->c_str(); - node->node_type_ = static_cast(c_node->nodeType()); auto count = c_node->inputIndex()->size(); for (uint32_t j = 0; j < count; ++j) { node->input_indices_.push_back(size_t(c_node->inputIndex()->template GetAs(j))); diff --git a/mindspore/lite/src/ops/compat/attr_transfer_common.cc b/mindspore/lite/src/ops/compat/attr_transfer_common.cc index c981ba6f44..d21bd7aac1 100644 --- a/mindspore/lite/src/ops/compat/attr_transfer_common.cc +++ b/mindspore/lite/src/ops/compat/attr_transfer_common.cc @@ -39,8 +39,8 @@ schema::Tensor *AttrToTensor(void *data, int data_size, bool is_array, TypeId ty memcpy(uint8_data.data(), data, dst_tensor->Size()); auto shape = dst_tensor->shape(); flatbuffers::FlatBufferBuilder fbb(1024); - auto tensor_offset = schema::CreateTensorDirect(fbb, schema::NodeType_ValueNode, type_id, &shape, schema::Format_NHWC, - 0, 0, &uint8_data); + auto tensor_offset = + schema::CreateTensorDirect(fbb, NodeType_ValueNode, type_id, &shape, schema::Format_NHWC, 0, 0, &uint8_data); fbb.Finish(tensor_offset); delete dst_tensor; auto buf = fbb.GetBufferPointer(); diff --git a/mindspore/lite/src/tensor.h b/mindspore/lite/src/tensor.h index 050fd116e0..a224d7c34e 100644 --- a/mindspore/lite/src/tensor.h +++ b/mindspore/lite/src/tensor.h @@ -27,6 +27,7 @@ #include "src/common/log_adapter.h" #include "schema/model_generated.h" +#include "src/common/utils.h" namespace mindspore { namespace lite { @@ -246,9 +247,9 @@ inline size_t DataTypeSize(const TypeId type) { } } -inline Tensor::Category TensorCategory(const schema::NodeType node_type, const size_t shape_num, const TypeId data_type, +inline Tensor::Category TensorCategory(const int node_type, const size_t shape_num, const TypeId data_type, const size_t data_size) { - return (node_type == schema::NodeType::NodeType_ValueNode) + return (node_type == NodeType_ValueNode) ? (shape_num == 0 && data_size == DataTypeSize(data_type) ? Tensor::Category::CONST_SCALAR : Tensor::Category::CONST_TENSOR) : Tensor::Category::VAR; diff --git a/mindspore/lite/test/common/import_from_meta_graphT.cc b/mindspore/lite/test/common/import_from_meta_graphT.cc index 86a9331899..9d4af45e85 100644 --- a/mindspore/lite/test/common/import_from_meta_graphT.cc +++ b/mindspore/lite/test/common/import_from_meta_graphT.cc @@ -42,7 +42,7 @@ int AnfImporterFromMetaGraphT::ConverterConstTensor() { for (size_t i = 0; i < meta_graph_->allTensors.size(); i++) { auto &tensor = meta_graph_->allTensors.at(i); MS_ASSERT(tensor != nullptr); - if (tensor->nodeType != schema::NodeType::NodeType_ValueNode) { + if (tensor->nodeType != NodeType_ValueNode) { continue; } auto parameter = func_graph_->add_parameter(); diff --git a/mindspore/lite/test/st/control_flow_test.cc b/mindspore/lite/test/st/control_flow_test.cc index 057ce463a5..3d83a1c7ad 100644 --- a/mindspore/lite/test/st/control_flow_test.cc +++ b/mindspore/lite/test/st/control_flow_test.cc @@ -221,7 +221,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { // ------- tensor --------- // tensor: 0 before-add input0
auto tensor_0 = std::make_unique(); - tensor_0->nodeType = schema::NodeType::NodeType_ValueNode; + tensor_0->nodeType = lite::NodeType_ValueNode; tensor_0->format = schema::Format_NHWC; tensor_0->dataType = TypeId::kNumberTypeFloat32; tensor_0->dims = {1}; @@ -231,7 +231,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { // tensor: 1 before-add input1 auto tensor_1 = std::make_unique(); - tensor_1->nodeType = schema::NodeType::NodeType_ValueNode; + tensor_1->nodeType = lite::NodeType_ValueNode; tensor_1->format = schema::Format_NHWC; tensor_1->dataType = TypeId::kNumberTypeFloat32; tensor_1->dims = {1}; @@ -244,7 +244,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { // tensor: 2 before-add output/partial input auto tensor_2 = std::make_unique(); - tensor_2->nodeType = schema::NodeType::NodeType_Parameter; + tensor_2->nodeType = lite::NodeType_Parameter; tensor_2->format = schema::Format_NHWC; tensor_2->dataType = TypeId::kNumberTypeFloat32; tensor_2->dims = {1}; @@ -254,7 +254,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { // tensor: 3 before-add input1 auto tensor_3 = std::make_unique(); - tensor_3->nodeType = schema::NodeType::NodeType_ValueNode; + tensor_3->nodeType = lite::NodeType_ValueNode; tensor_3->format = schema::Format_NHWC; tensor_3->dataType = TypeId::kNumberTypeFloat32; tensor_3->dims = {1}; @@ -266,7 +266,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { MS_LOG(DEBUG) << "tensor 3"; auto tensor_4 = std::make_unique(); - tensor_4->nodeType = schema::NodeType::NodeType_Parameter; + tensor_4->nodeType = lite::NodeType_Parameter; tensor_4->format = schema::Format_NHWC; tensor_4->dataType = TypeId::kNumberTypeFloat32; tensor_4->dims = {1}; @@ -276,7 +276,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { // tensor :5 partial output auto tensor_5 = std::make_unique(); - tensor_5->nodeType = schema::NodeType::NodeType_Parameter; + tensor_5->nodeType = lite::NodeType_Parameter; tensor_5->format = schema::Format_NHWC; tensor_5->dataType = TypeId::kNumberTypeBool; tensor_5->dims = {1}; @@ -286,7 +286,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { // tensor: 6 switch true output auto tensor_6 = std::make_unique(); - tensor_6->nodeType = schema::NodeType::NodeType_Parameter; + tensor_6->nodeType = lite::NodeType_Parameter; tensor_6->format = schema::Format_NHWC; tensor_6->dataType = TypeId::kNumberTypeFloat32; tensor_6->dims = {1}; @@ -296,7 +296,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { // tensor: 5 switch False output auto tensor_7 = std::make_unique(); - tensor_7->nodeType = schema::NodeType::NodeType_Parameter; + tensor_7->nodeType = lite::NodeType_Parameter; tensor_7->format = schema::Format_NHWC; tensor_7->dataType = TypeId::kNumberTypeFloat32; tensor_7->dims = {1}; @@ -306,7 +306,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { // tensor: 6 body-add input ,other input is switch true output auto tensor_8 = std::make_unique(); - tensor_8->nodeType = schema::NodeType::NodeType_ValueNode; + tensor_8->nodeType = lite::NodeType_ValueNode; tensor_8->format = schema::Format_NHWC; tensor_8->dataType = TypeId::kNumberTypeFloat32; tensor_8->dims = {1}; @@ -318,7 +318,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { MS_LOG(DEBUG) << "tensor_8"; auto tensor_9 = std::make_unique(); - tensor_9->nodeType = schema::NodeType::NodeType_Parameter; + tensor_9->nodeType = lite::NodeType_Parameter; tensor_9->format = schema::Format_NHWC; tensor_9->dataType = TypeId::kNumberTypeFloat32; tensor_9->dims = {1}; @@ -328,7 +328,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { // tensor: 7 after-add input ,other input is switch false output auto tensor_10 = std::make_unique(); - tensor_10->nodeType = schema::NodeType::NodeType_ValueNode; + tensor_10->nodeType = lite::NodeType_ValueNode; tensor_10->format = schema::Format_NHWC; tensor_10->dataType = TypeId::kNumberTypeFloat32; tensor_10->dims = {1}; @@ -341,7 +341,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { // tensor: 8 main graph output auto tensor_11 = std::make_unique(); - tensor_11->nodeType = schema::NodeType::NodeType_Parameter; + tensor_11->nodeType = lite::NodeType_Parameter; tensor_11->format = schema::Format_NHWC; tensor_11->dataType = TypeId::kNumberTypeFloat32; tensor_11->dims = {1}; @@ -351,7 +351,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { // tensor: 9 cond-Less input, other input is tensor 2 auto tensor_12 = std::make_unique(); - tensor_12->nodeType = schema::NodeType::NodeType_ValueNode; + tensor_12->nodeType = lite::NodeType_ValueNode; tensor_12->format = schema::Format_NHWC; tensor_12->dataType = TypeId::kNumberTypeFloat32; tensor_12->dims = {1}; @@ -363,7 +363,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { MS_LOG(DEBUG) << "tensor_12"; auto tensor_13 = std::make_unique(); - tensor_13->nodeType = schema::NodeType::NodeType_ValueNode; + tensor_13->nodeType = lite::NodeType_ValueNode; tensor_13->format = schema::Format_NHWC; tensor_13->dataType = TypeId::kNumberTypeFloat32; tensor_13->dims = {1}; @@ -375,7 +375,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { MS_LOG(DEBUG) << "tensor_13"; auto tensor_14 = std::make_unique(); - tensor_14->nodeType = schema::NodeType::NodeType_Parameter; + tensor_14->nodeType = lite::NodeType_Parameter; tensor_14->format = schema::Format_NHWC; tensor_14->dataType = TypeId::kNumberTypeFloat32; tensor_14->dims = {1}; @@ -384,7 +384,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { MS_LOG(DEBUG) << "tensor 14"; auto tensor_15 = std::make_unique(); - tensor_15->nodeType = schema::NodeType::NodeType_ValueNode; + tensor_15->nodeType = lite::NodeType_ValueNode; tensor_15->format = schema::Format_NHWC; tensor_15->dataType = TypeId::kNumberTypeFloat32; tensor_15->dims = {1}; @@ -396,7 +396,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { MS_LOG(DEBUG) << "tensor_15"; auto tensor_16 = std::make_unique(); - tensor_16->nodeType = schema::NodeType::NodeType_Parameter; + tensor_16->nodeType = lite::NodeType_Parameter; tensor_16->format = schema::Format_NHWC; tensor_16->dataType = TypeId::kNumberTypeFloat32; tensor_16->dims = {1}; @@ -405,7 +405,7 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { MS_LOG(DEBUG) << "tensor_16"; auto tensor_17 = std::make_unique(); - tensor_17->nodeType = schema::NodeType::NodeType_Parameter; + tensor_17->nodeType = lite::NodeType_Parameter; tensor_17->format = schema::Format_NHWC; tensor_17->dataType = TypeId::kNumberTypeFloat32; tensor_17->dims = {1}; diff --git a/mindspore/lite/test/st/sub_graph_test.cc b/mindspore/lite/test/st/sub_graph_test.cc index 0d43c17ad5..05f23512e5 100644 --- a/mindspore/lite/test/st/sub_graph_test.cc +++ b/mindspore/lite/test/st/sub_graph_test.cc @@ -50,12 +50,12 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { add_0->primitive->value.value = add_0_prim; add_0->name = "Add0"; auto tensor_0 = std::make_unique(); - tensor_0->nodeType = schema::NodeType::NodeType_ValueNode; + tensor_0->nodeType = lite::NodeType_ValueNode; tensor_0->format = schema::Format_NHWC; tensor_0->dataType = TypeId::kNumberTypeFloat32; tensor_0->dims = {1}; auto tensor_1 = std::make_unique(); - tensor_1->nodeType = schema::NodeType::NodeType_ValueNode; + tensor_1->nodeType = lite::NodeType_ValueNode; tensor_1->format = schema::Format_NHWC; tensor_1->dataType = TypeId::kNumberTypeFloat32; tensor_1->dims = {1}; @@ -64,7 +64,7 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { ASSERT_NE(data1, nullptr); data1[0] = 1; auto tensor_2 = std::make_unique(); - tensor_2->nodeType = schema::NodeType::NodeType_Parameter; + tensor_2->nodeType = lite::NodeType_Parameter; tensor_2->format = schema::Format_NHWC; tensor_2->dataType = TypeId::kNumberTypeFloat32; meta_graph->nodes.emplace_back(std::move(add_0)); @@ -83,7 +83,7 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { add_1->primitive->value.value = add_1_prim; add_1->name = "Add1"; auto tensor_3 = std::make_unique(); - tensor_3->nodeType = schema::NodeType::NodeType_ValueNode; + tensor_3->nodeType = lite::NodeType_ValueNode; tensor_3->format = schema::Format_NHWC; tensor_3->dataType = TypeId::kNumberTypeFloat32; tensor_3->dims = {1}; @@ -92,7 +92,7 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { ASSERT_NE(data3, nullptr); data3[0] = 1; auto tensor_4 = std::make_unique(); - tensor_4->nodeType = schema::NodeType::NodeType_Parameter; + tensor_4->nodeType = lite::NodeType_Parameter; tensor_4->format = schema::Format_NHWC; tensor_4->dataType = TypeId::kNumberTypeFloat32; meta_graph->nodes.emplace_back(std::move(add_1)); @@ -122,7 +122,7 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { add_5->primitive->value.value = add_5_prim; add_5->name = "Add5"; auto tensor_13 = std::make_unique(); - tensor_13->nodeType = schema::NodeType::NodeType_ValueNode; + tensor_13->nodeType = lite::NodeType_ValueNode; tensor_13->format = schema::Format_NHWC; tensor_13->dataType = TypeId::kNumberTypeFloat32; tensor_13->dims = {1}; @@ -131,7 +131,7 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { ASSERT_NE(data13, nullptr); data13[0] = 1; auto tensor_14 = std::make_unique(); - tensor_14->nodeType = schema::NodeType::NodeType_Parameter; + tensor_14->nodeType = lite::NodeType_Parameter; tensor_14->format = schema::Format_NHWC; tensor_14->dataType = TypeId::kNumberTypeFloat32; meta_graph->nodes.emplace_back(std::move(add_5)); @@ -158,7 +158,7 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { add_2->primitive->value.value = add_2_prim; add_2->name = "Add2"; auto tensor_5 = std::make_unique(); - tensor_5->nodeType = schema::NodeType::NodeType_ValueNode; + tensor_5->nodeType = lite::NodeType_ValueNode; tensor_5->format = schema::Format_NHWC; tensor_5->dataType = TypeId::kNumberTypeFloat32; tensor_5->dims = {1}; @@ -167,7 +167,7 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { ASSERT_NE(data5, nullptr); data5[0] = 1; auto tensor_6 = std::make_unique(); - tensor_6->nodeType = schema::NodeType::NodeType_Parameter; + tensor_6->nodeType = lite::NodeType_Parameter; tensor_6->format = schema::Format_NHWC; tensor_6->dataType = TypeId::kNumberTypeFloat32; meta_graph->nodes.emplace_back(std::move(add_2)); @@ -184,7 +184,7 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { less->primitive->value.value = less_prim; less->name = "less"; auto tensor_15 = std::make_unique(); - tensor_15->nodeType = schema::NodeType::NodeType_ValueNode; + tensor_15->nodeType = lite::NodeType_ValueNode; tensor_15->format = schema::Format_NHWC; tensor_15->dataType = TypeId::kNumberTypeFloat32; tensor_15->dims = {1}; @@ -193,7 +193,7 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { ASSERT_NE(data15, nullptr); data15[0] = 1; auto tensor_7 = std::make_unique(); - tensor_7->nodeType = schema::NodeType::NodeType_Parameter; + tensor_7->nodeType = lite::NodeType_Parameter; tensor_7->format = schema::Format_NHWC; tensor_7->dataType = TypeId::kNumberTypeFloat32; meta_graph->nodes.emplace_back(std::move(less)); @@ -210,11 +210,11 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { switchop->primitive->value.value = switch_prim; switchop->name = "switch"; auto tensor_8 = std::make_unique(); - tensor_8->nodeType = schema::NodeType::NodeType_Parameter; + tensor_8->nodeType = lite::NodeType_Parameter; tensor_8->format = schema::Format_NHWC; tensor_8->dataType = TypeId::kNumberTypeFloat32; auto tensor_9 = std::make_unique(); - tensor_9->nodeType = schema::NodeType::NodeType_Parameter; + tensor_9->nodeType = lite::NodeType_Parameter; tensor_9->format = schema::Format_NHWC; tensor_9->dataType = TypeId::kNumberTypeFloat32; meta_graph->nodes.emplace_back(std::move(switchop)); @@ -253,7 +253,7 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { add_3->primitive->value.value = add_3_prim; add_3->name = "Add3"; auto tensor_10 = std::make_unique(); - tensor_10->nodeType = schema::NodeType::NodeType_ValueNode; + tensor_10->nodeType = lite::NodeType_ValueNode; tensor_10->format = schema::Format_NHWC; tensor_10->dataType = TypeId::kNumberTypeFloat32; tensor_10->dims = {1}; @@ -262,7 +262,7 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { ASSERT_NE(data10, nullptr); data10[0] = 1; auto tensor_11 = std::make_unique(); - tensor_11->nodeType = schema::NodeType::NodeType_Parameter; + tensor_11->nodeType = lite::NodeType_Parameter; tensor_11->format = schema::Format_NHWC; tensor_11->dataType = TypeId::kNumberTypeFloat32; meta_graph->nodes.emplace_back(std::move(add_3)); @@ -280,7 +280,7 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { add_4->primitive->value.value = add_4_prim; add_4->name = "Add4"; auto tensor_12 = std::make_unique(); - tensor_12->nodeType = schema::NodeType::NodeType_ValueNode; + tensor_12->nodeType = lite::NodeType_ValueNode; tensor_12->format = schema::Format_NHWC; tensor_12->dataType = TypeId::kNumberTypeFloat32; tensor_12->dims = {1}; diff --git a/mindspore/lite/test/ut/src/infer_test.cc b/mindspore/lite/test/ut/src/infer_test.cc index 263d76c800..dbd4e54167 100644 --- a/mindspore/lite/test/ut/src/infer_test.cc +++ b/mindspore/lite/test/ut/src/infer_test.cc @@ -56,7 +56,7 @@ TEST_F(InferTest, TestConvNode) { meta_graph->outputIndex = {2}; auto input0 = std::make_unique(); - input0->nodeType = schema::NodeType::NodeType_ValueNode; + input0->nodeType = lite::NodeType_ValueNode; input0->format = schema::Format_NHWC; input0->dataType = TypeId::kNumberTypeFloat32; input0->dims = {1, 28, 28, 3}; @@ -64,7 +64,7 @@ TEST_F(InferTest, TestConvNode) { meta_graph->allTensors.emplace_back(std::move(input0)); auto weight = std::make_unique(); - weight->nodeType = schema::NodeType::NodeType_ValueNode; + weight->nodeType = lite::NodeType_ValueNode; weight->format = schema::Format_KHWC; weight->dataType = TypeId::kNumberTypeFloat32; weight->dims = {32, 3, 3, 3}; @@ -85,7 +85,7 @@ TEST_F(InferTest, TestConvNode) { meta_graph->allTensors.emplace_back(std::move(weight)); auto output = std::make_unique(); - output->nodeType = schema::NodeType::NodeType_Parameter; + output->nodeType = lite::NodeType_Parameter; output->format = schema::Format_NHWC; output->dataType = TypeId::kNumberTypeFloat32; output->dims = {1, 28, 28, 32}; @@ -169,7 +169,7 @@ TEST_F(InferTest, TestAddNode) { meta_graph->outputIndex = {2}; auto input0 = std::make_unique(); - input0->nodeType = schema::NodeType::NodeType_ValueNode; + input0->nodeType = lite::NodeType_ValueNode; input0->format = schema::Format_NHWC; input0->dataType = TypeId::kNumberTypeFloat32; input0->dims = {1, 28, 28, 3}; @@ -177,7 +177,7 @@ TEST_F(InferTest, TestAddNode) { meta_graph->allTensors.emplace_back(std::move(input0)); auto weight = std::make_unique(); - weight->nodeType = schema::NodeType::NodeType_ValueNode; + weight->nodeType = lite::NodeType_ValueNode; weight->format = schema::Format_KHWC; weight->dataType = TypeId::kNumberTypeFloat32; weight->dims = {1, 28, 28, 3}; @@ -186,7 +186,7 @@ TEST_F(InferTest, TestAddNode) { meta_graph->allTensors.emplace_back(std::move(weight)); auto output = std::make_unique(); - output->nodeType = schema::NodeType::NodeType_Parameter; + output->nodeType = lite::NodeType_Parameter; output->format = schema::Format_NHWC; output->dataType = TypeId::kNumberTypeFloat32; output->offset = -1; @@ -260,7 +260,7 @@ TEST_F(InferTest, TestParallelExecutor) { meta_graph->outputIndex = {2}; auto input0 = std::make_unique(); - input0->nodeType = schema::NodeType::NodeType_ValueNode; + input0->nodeType = lite::NodeType_ValueNode; input0->format = schema::Format_NHWC; input0->dataType = TypeId::kNumberTypeFloat32; input0->dims = {1, 28, 28, 3}; @@ -268,7 +268,7 @@ TEST_F(InferTest, TestParallelExecutor) { meta_graph->allTensors.emplace_back(std::move(input0)); auto weight = std::make_unique(); - weight->nodeType = schema::NodeType::NodeType_ValueNode; + weight->nodeType = lite::NodeType_ValueNode; weight->format = schema::Format_NHWC; weight->dataType = TypeId::kNumberTypeFloat32; weight->dims = {1, 28, 28, 3}; @@ -277,7 +277,7 @@ TEST_F(InferTest, TestParallelExecutor) { meta_graph->allTensors.emplace_back(std::move(weight)); auto output = std::make_unique(); - output->nodeType = schema::NodeType::NodeType_Parameter; + output->nodeType = lite::NodeType_Parameter; output->format = schema::Format_NHWC; output->dataType = TypeId::kNumberTypeFloat32; output->offset = -1; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc index cf1ef8b732..7c2fd55927 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc @@ -187,7 +187,7 @@ TEST_F(NetworkTest, tuning_layer) { meta_graph->outputIndex = {5, 14}; auto input0 = std::make_unique(); - input0->nodeType = schema::NodeType::NodeType_ValueNode; + input0->nodeType = lite::NodeType_ValueNode; input0->format = schema::Format_NHWC; input0->dataType = TypeId::kNumberTypeFloat32; input0->dims = {BATCH_SIZE, FEATURE_SIZE}; @@ -195,7 +195,7 @@ TEST_F(NetworkTest, tuning_layer) { meta_graph->allTensors.emplace_back(std::move(input0)); // tensor 1 - relu auto relu_out = std::make_unique(); - relu_out->nodeType = schema::NodeType::NodeType_Parameter; + relu_out->nodeType = lite::NodeType_Parameter; relu_out->format = schema::Format_NHWC; relu_out->dataType = TypeId::kNumberTypeFloat32; relu_out->dims = {BATCH_SIZE, FEATURE_SIZE}; @@ -203,7 +203,7 @@ TEST_F(NetworkTest, tuning_layer) { meta_graph->allTensors.emplace_back(std::move(relu_out)); // tensor 2 - matmul weights auto weight = std::make_unique(); - weight->nodeType = schema::NodeType::NodeType_ValueNode; + weight->nodeType = lite::NodeType_ValueNode; weight->format = schema::Format_KHWC; weight->dataType = TypeId::kNumberTypeFloat32; weight->dims = {NUM_CLASSES, FEATURE_SIZE}; @@ -218,7 +218,7 @@ TEST_F(NetworkTest, tuning_layer) { delete[] buf; // tensor 3 - matmul auto input3 = std::make_unique(); - input3->nodeType = schema::NodeType::NodeType_Parameter; + input3->nodeType = lite::NodeType_Parameter; input3->format = schema::Format_NHWC; input3->dataType = TypeId::kNumberTypeFloat32; input3->dims = {BATCH_SIZE, NUM_CLASSES}; @@ -226,7 +226,7 @@ TEST_F(NetworkTest, tuning_layer) { meta_graph->allTensors.emplace_back(std::move(input3)); // tensor 4 - fc bias auto bias = std::make_unique(); - bias->nodeType = schema::NodeType::NodeType_ValueNode; + bias->nodeType = lite::NodeType_ValueNode; bias->format = schema::Format_NHWC; bias->dataType = TypeId::kNumberTypeFloat32; bias->dims = {NUM_CLASSES}; @@ -242,7 +242,7 @@ TEST_F(NetworkTest, tuning_layer) { // tensor 5 - bias_add auto input5 = std::make_unique(); - input5->nodeType = schema::NodeType::NodeType_Parameter; + input5->nodeType = lite::NodeType_Parameter; input5->format = schema::Format_NHWC; input5->dataType = TypeId::kNumberTypeFloat32; input5->dims = {BATCH_SIZE, NUM_CLASSES}; @@ -251,7 +251,7 @@ TEST_F(NetworkTest, tuning_layer) { // tensor 6 - Label { auto label = std::make_unique(); - label->nodeType = schema::NodeType::NodeType_ValueNode; + label->nodeType = lite::NodeType_ValueNode; label->format = schema::Format_NHWC; label->dataType = TypeId::kNumberTypeFloat32; label->dims = {BATCH_SIZE * NUM_CLASSES}; @@ -260,7 +260,7 @@ TEST_F(NetworkTest, tuning_layer) { } // tensor 7 - Softmaxentropy auto input7 = std::make_unique(); - input7->nodeType = schema::NodeType::NodeType_Parameter; + input7->nodeType = lite::NodeType_Parameter; input7->format = schema::Format_NHWC; input7->dataType = TypeId::kNumberTypeFloat32; input7->dims = {BATCH_SIZE, NUM_CLASSES}; @@ -268,7 +268,7 @@ TEST_F(NetworkTest, tuning_layer) { meta_graph->allTensors.emplace_back(std::move(input7)); // tensor 8 - biasGrad auto input8 = std::make_unique(); - input8->nodeType = schema::NodeType::NodeType_Parameter; + input8->nodeType = lite::NodeType_Parameter; input8->format = schema::Format_NHWC; input8->dataType = TypeId::kNumberTypeFloat32; input8->dims = {NUM_CLASSES}; @@ -276,7 +276,7 @@ TEST_F(NetworkTest, tuning_layer) { meta_graph->allTensors.emplace_back(std::move(input8)); // tensor 9 - matmul2 auto input9 = std::make_unique(); - input9->nodeType = schema::NodeType::NodeType_Parameter; + input9->nodeType = lite::NodeType_Parameter; input9->format = schema::Format_NHWC; input9->dataType = TypeId::kNumberTypeFloat32; input9->dims = {NUM_CLASSES, FEATURE_SIZE}; @@ -284,7 +284,7 @@ TEST_F(NetworkTest, tuning_layer) { meta_graph->allTensors.emplace_back(std::move(input9)); // tensor 10 weights accumulate auto input10 = std::make_unique(); - input10->nodeType = schema::NodeType::NodeType_ValueNode; + input10->nodeType = lite::NodeType_ValueNode; input10->format = schema::Format_NHWC; input10->dataType = TypeId::kNumberTypeFloat32; input10->dims = {NUM_CLASSES, FEATURE_SIZE}; @@ -296,7 +296,7 @@ TEST_F(NetworkTest, tuning_layer) { // tensor 11 - lr { auto lr = std::make_unique(); - lr->nodeType = schema::NodeType::NodeType_ValueNode; + lr->nodeType = lite::NodeType_ValueNode; lr->format = schema::Format_NHWC; lr->dataType = TypeId::kNumberTypeFloat32; lr->dims = {1}; @@ -309,7 +309,7 @@ TEST_F(NetworkTest, tuning_layer) { // tensor 12 - momentum { auto input12 = std::make_unique(); - input12->nodeType = schema::NodeType::NodeType_ValueNode; + input12->nodeType = lite::NodeType_ValueNode; input12->format = schema::Format_NHWC; input12->dataType = TypeId::kNumberTypeFloat32; input12->dims = {1}; @@ -321,7 +321,7 @@ TEST_F(NetworkTest, tuning_layer) { } // tensor 13 - bias accumulate auto input13 = std::make_unique(); - input13->nodeType = schema::NodeType::NodeType_ValueNode; + input13->nodeType = lite::NodeType_ValueNode; input13->format = schema::Format_NHWC; input13->dataType = TypeId::kNumberTypeFloat32; input13->dims = {NUM_CLASSES}; @@ -334,7 +334,7 @@ TEST_F(NetworkTest, tuning_layer) { // tensor 14 - loss { auto loss14 = std::make_unique(); - loss14->nodeType = schema::NodeType::NodeType_ValueNode; + loss14->nodeType = lite::NodeType_ValueNode; loss14->format = schema::Format_NHWC; loss14->dataType = TypeId::kNumberTypeFloat32; loss14->dims = {1}; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc index b7250da49c..0bc0a38e2c 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc @@ -31,7 +31,6 @@ namespace mindspore { using mindspore::lite::QuantArg; using mindspore::lite::Tensor; using mindspore::schema::Format_NHWC; -using mindspore::schema::NodeType_Parameter; class TestDeconvInt8 : public mindspore::CommonTest { public: TestDeconvInt8() {} diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc index 63fffccb13..b209038119 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc @@ -26,7 +26,6 @@ namespace mindspore { using mindspore::lite::QuantArg; using mindspore::lite::Tensor; -using mindspore::schema::NodeType_Parameter; class TestPadInt8 : public mindspore::CommonTest { public: TestPadInt8() {} diff --git a/mindspore/lite/test/ut/src/scheduler_test.cc b/mindspore/lite/test/ut/src/scheduler_test.cc index f1c499616f..c7cf00de28 100644 --- a/mindspore/lite/test/ut/src/scheduler_test.cc +++ b/mindspore/lite/test/ut/src/scheduler_test.cc @@ -96,49 +96,49 @@ TEST_F(SchedulerTest, TestConstructSubGraphsTwoBranch) { concat->name = "concat"; auto tensor0 = std::make_unique(); - tensor0->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor0->nodeType = mindspore::lite::NodeType_ValueNode; tensor0->format = mindspore::schema::Format_NHWC; tensor0->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor0->dims = {1, 16, 16, 4}; tensor0->offset = -1; auto tensor1 = std::make_unique(); - tensor1->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor1->nodeType = mindspore::lite::NodeType_ValueNode; tensor1->format = mindspore::schema::Format_NHWC; tensor1->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor1->dims = {1, 16, 16, 2}; tensor1->offset = -1; auto tensor2 = std::make_unique(); - tensor2->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor2->nodeType = mindspore::lite::NodeType_ValueNode; tensor2->format = mindspore::schema::Format_NHWC; tensor2->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor2->dims = {1, 16, 16, 2}; tensor2->offset = -1; auto tensor3 = std::make_unique(); - tensor3->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor3->nodeType = mindspore::lite::NodeType_ValueNode; tensor3->format = mindspore::schema::Format_NHWC; tensor3->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor3->dims = {1, 16, 16, 2}; tensor3->offset = -1; auto tensor4 = std::make_unique(); - tensor4->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor4->nodeType = mindspore::lite::NodeType_ValueNode; tensor4->format = mindspore::schema::Format_NHWC; tensor4->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor4->dims = {1, 16, 16, 2}; tensor4->offset = -1; auto tensor5 = std::make_unique(); - tensor5->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor5->nodeType = mindspore::lite::NodeType_ValueNode; tensor5->format = mindspore::schema::Format_NHWC; tensor5->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor5->dims = {1, 16, 16, 2}; tensor5->offset = -1; auto tensor6 = std::make_unique(); - tensor6->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor6->nodeType = mindspore::lite::NodeType_ValueNode; tensor6->format = mindspore::schema::Format_NHWC; tensor6->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor6->dims = {1, 16, 16, 2}; tensor6->offset = -1; auto tensor7 = std::make_unique(); - tensor7->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor7->nodeType = mindspore::lite::NodeType_ValueNode; tensor7->format = mindspore::schema::Format_NHWC; tensor7->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor7->dims = {1, 16, 16, 4}; @@ -257,67 +257,67 @@ TEST_F(SchedulerTest, TestConstructSubGraphsThreeBranch) { concat->name = "concat"; auto tensor0 = std::make_unique(); - tensor0->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor0->nodeType = mindspore::lite::NodeType_ValueNode; tensor0->format = mindspore::schema::Format_NHWC; tensor0->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor0->dims = {1, 16, 16, 3}; tensor0->offset = -1; auto tensor1 = std::make_unique(); - tensor1->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor1->nodeType = mindspore::lite::NodeType_ValueNode; tensor1->format = mindspore::schema::Format_NHWC; tensor1->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor1->dims = {1, 16, 16, 1}; tensor1->offset = -1; auto tensor2 = std::make_unique(); - tensor2->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor2->nodeType = mindspore::lite::NodeType_ValueNode; tensor2->format = mindspore::schema::Format_NHWC; tensor2->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor2->dims = {1, 16, 16, 1}; tensor2->offset = -1; auto tensor3 = std::make_unique(); - tensor3->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor3->nodeType = mindspore::lite::NodeType_ValueNode; tensor3->format = mindspore::schema::Format_NHWC; tensor3->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor3->dims = {1, 16, 16, 1}; tensor3->offset = -1; auto tensor4 = std::make_unique(); - tensor4->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor4->nodeType = mindspore::lite::NodeType_ValueNode; tensor4->format = mindspore::schema::Format_NHWC; tensor4->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor4->dims = {1, 16, 16, 1}; tensor4->offset = -1; auto tensor5 = std::make_unique(); - tensor5->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor5->nodeType = mindspore::lite::NodeType_ValueNode; tensor5->format = mindspore::schema::Format_NHWC; tensor5->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor5->dims = {1, 16, 16, 1}; tensor5->offset = -1; auto tensor6 = std::make_unique(); - tensor6->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor6->nodeType = mindspore::lite::NodeType_ValueNode; tensor6->format = mindspore::schema::Format_NHWC; tensor6->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor6->dims = {1, 16, 16, 1}; tensor6->offset = -1; auto tensor7 = std::make_unique(); - tensor7->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor7->nodeType = mindspore::lite::NodeType_ValueNode; tensor7->format = mindspore::schema::Format_NHWC; tensor7->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor7->dims = {1, 16, 16, 1}; tensor7->offset = -1; auto tensor8 = std::make_unique(); - tensor8->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor8->nodeType = mindspore::lite::NodeType_ValueNode; tensor8->format = mindspore::schema::Format_NHWC; tensor8->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor8->dims = {1, 16, 16, 1}; tensor8->offset = -1; auto tensor9 = std::make_unique(); - tensor9->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor9->nodeType = mindspore::lite::NodeType_ValueNode; tensor9->format = mindspore::schema::Format_NHWC; tensor9->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor9->dims = {1, 16, 16, 1}; tensor9->offset = -1; auto tensor10 = std::make_unique(); - tensor10->nodeType = mindspore::schema::NodeType::NodeType_ValueNode; + tensor10->nodeType = mindspore::lite::NodeType_ValueNode; tensor10->format = mindspore::schema::Format_NHWC; tensor10->dataType = mindspore::TypeId::kNumberTypeFloat32; tensor10->dims = {1, 16, 16, 3}; diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/constant_folding_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/constant_folding_fusion_test.cc index 67e899eb82..2451a4bb1d 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/constant_folding_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/constant_folding_fusion_test.cc @@ -56,7 +56,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType op_type, void *op_node) { // input 0: data1 auto input0 = std::make_unique(); - input0->nodeType = schema::NodeType::NodeType_ValueNode; + input0->nodeType = lite::NodeType_ValueNode; input0->format = schema::Format_NHWC; input0->dataType = TypeId::kNumberTypeFloat32; input0->dims = {1, 2, 2, 3}; @@ -72,7 +72,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType op_type, void *op_node) { // input 1: data2 auto input1 = std::make_unique(); - input1->nodeType = schema::NodeType::NodeType_ValueNode; + input1->nodeType = lite::NodeType_ValueNode; input1->format = schema::Format_NHWC; input1->dataType = TypeId::kNumberTypeFloat32; input1->dims = {1, 2, 2, 3}; @@ -88,7 +88,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType op_type, void *op_node) { // final add output auto add_output = std::make_unique(); - add_output->nodeType = schema::NodeType::NodeType_Parameter; + add_output->nodeType = lite::NodeType_Parameter; add_output->format = schema::Format_NHWC; add_output->dataType = TypeId::kNumberTypeFloat32; add_output->dims = {1, 2, 2, 3}; @@ -115,7 +115,7 @@ MetaGraphTptr BuildGraphForOneInput(schema::PrimitiveType op_type, void *op_node // input 0: data1 auto input0 = std::make_unique(); - input0->nodeType = schema::NodeType::NodeType_ValueNode; + input0->nodeType = lite::NodeType_ValueNode; input0->format = schema::Format_NHWC; input0->dataType = TypeId::kNumberTypeFloat32; input0->dims = {1, 2, 2, 3}; @@ -131,7 +131,7 @@ MetaGraphTptr BuildGraphForOneInput(schema::PrimitiveType op_type, void *op_node // final add output auto add_output = std::make_unique(); - add_output->nodeType = schema::NodeType::NodeType_Parameter; + add_output->nodeType = lite::NodeType_Parameter; add_output->format = schema::Format_NHWC; add_output->dataType = TypeId::kNumberTypeFloat32; add_output->dims = {1, 2, 2, 3}; @@ -168,7 +168,7 @@ MetaGraphTptr BuildMixGraph() { // input 0: data1 auto input0 = std::make_unique(); - input0->nodeType = schema::NodeType::NodeType_ValueNode; + input0->nodeType = lite::NodeType_ValueNode; input0->format = schema::Format_NHWC; input0->dataType = TypeId::kNumberTypeFloat32; input0->dims = {1, 2, 2, 3}; @@ -184,7 +184,7 @@ MetaGraphTptr BuildMixGraph() { // input 1: data2 auto input1 = std::make_unique(); - input1->nodeType = schema::NodeType::NodeType_ValueNode; + input1->nodeType = lite::NodeType_ValueNode; input1->format = schema::Format_NHWC; input1->dataType = TypeId::kNumberTypeFloat32; input1->dims = {1, 2, 2, 3}; @@ -200,7 +200,7 @@ MetaGraphTptr BuildMixGraph() { // addoutput auto add_output = std::make_unique(); - add_output->nodeType = schema::NodeType::NodeType_Parameter; + add_output->nodeType = lite::NodeType_Parameter; add_output->format = schema::Format_NHWC; add_output->dataType = TypeId::kNumberTypeFloat32; add_output->dims = {1, 2, 2, 3}; @@ -213,7 +213,7 @@ MetaGraphTptr BuildMixGraph() { // input 2: data3 auto input2 = std::make_unique(); - input2->nodeType = schema::NodeType::NodeType_ValueNode; + input2->nodeType = lite::NodeType_ValueNode; input2->format = schema::Format_NHWC; input2->dataType = TypeId::kNumberTypeFloat32; input2->dims = {1, 2, 2, 3}; @@ -229,7 +229,7 @@ MetaGraphTptr BuildMixGraph() { // final mul output auto mul_output = std::make_unique(); - mul_output->nodeType = schema::NodeType::NodeType_Parameter; + mul_output->nodeType = lite::NodeType_Parameter; mul_output->format = schema::Format_NHWC; mul_output->dataType = TypeId::kNumberTypeFloat32; mul_output->dims = {1, 2, 2, 3}; @@ -278,7 +278,7 @@ MetaGraphTptr BuildSplitGraph() { // input 0: data1 auto input0 = std::make_unique(); - input0->nodeType = schema::NodeType::NodeType_ValueNode; + input0->nodeType = lite::NodeType_ValueNode; input0->format = schema::Format_NHWC; input0->dataType = TypeId::kNumberTypeFloat32; input0->dims = {1, 2, 2, 3}; @@ -294,7 +294,7 @@ MetaGraphTptr BuildSplitGraph() { // split output1 auto split_output1 = std::make_unique(); - split_output1->nodeType = schema::NodeType::NodeType_Parameter; + split_output1->nodeType = lite::NodeType_Parameter; split_output1->format = schema::Format_NHWC; split_output1->dataType = TypeId::kNumberTypeFloat32; split_output1->dims = {1, 1, 2, 3}; @@ -307,7 +307,7 @@ MetaGraphTptr BuildSplitGraph() { // split output2 auto split_output2 = std::make_unique(); - split_output2->nodeType = schema::NodeType::NodeType_Parameter; + split_output2->nodeType = lite::NodeType_Parameter; split_output2->format = schema::Format_NHWC; split_output2->dataType = TypeId::kNumberTypeFloat32; split_output2->dims = {1, 1, 2, 3}; @@ -320,7 +320,7 @@ MetaGraphTptr BuildSplitGraph() { // input 1: data2 auto input1 = std::make_unique(); - input1->nodeType = schema::NodeType::NodeType_ValueNode; + input1->nodeType = lite::NodeType_ValueNode; input1->format = schema::Format_NHWC; input1->dataType = TypeId::kNumberTypeFloat32; input1->dims = {1, 1, 2, 3}; @@ -336,7 +336,7 @@ MetaGraphTptr BuildSplitGraph() { // input 2: data3 auto input2 = std::make_unique(); - input2->nodeType = schema::NodeType::NodeType_ValueNode; + input2->nodeType = lite::NodeType_ValueNode; input2->format = schema::Format_NHWC; input2->dataType = TypeId::kNumberTypeFloat32; input2->dims = {1, 1, 2, 3}; @@ -352,7 +352,7 @@ MetaGraphTptr BuildSplitGraph() { // final mul output1 auto mul_output = std::make_unique(); - mul_output->nodeType = schema::NodeType::NodeType_Parameter; + mul_output->nodeType = lite::NodeType_Parameter; mul_output->format = schema::Format_NHWC; mul_output->dataType = TypeId::kNumberTypeFloat32; mul_output->dims = {1, 1, 2, 3}; @@ -360,7 +360,7 @@ MetaGraphTptr BuildSplitGraph() { // final mul output2 auto mul_output2 = std::make_unique(); - mul_output2->nodeType = schema::NodeType::NodeType_Parameter; + mul_output2->nodeType = lite::NodeType_Parameter; mul_output2->format = schema::Format_NHWC; mul_output2->dataType = TypeId::kNumberTypeFloat32; mul_output2->dims = {1, 1, 2, 3}; diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc index bd881ec986..45abc045c0 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc @@ -100,7 +100,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, schema::ActivationType // input 0: data auto input0 = std::make_unique(); - input0->nodeType = schema::NodeType::NodeType_ValueNode; + input0->nodeType = lite::NodeType_ValueNode; input0->format = schema::Format_NHWC; input0->dataType = TypeId::kNumberTypeFloat32; input0->dims = {1, 5, 5, 3}; @@ -109,7 +109,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, schema::ActivationType // input 1: weight auto input1 = std::make_unique(); - input1->nodeType = schema::NodeType::NodeType_ValueNode; + input1->nodeType = lite::NodeType_ValueNode; input1->format = schema::Format_KHWC; input1->dataType = TypeId::kNumberTypeFloat32; input1->dims = {8, 3, 3, 3}; @@ -118,7 +118,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, schema::ActivationType // conv output auto conv_output = std::make_unique(); - conv_output->nodeType = schema::NodeType::NodeType_Parameter; + conv_output->nodeType = lite::NodeType_Parameter; conv_output->format = schema::Format_NHWC; conv_output->dataType = TypeId::kNumberTypeFloat32; conv_output->dims = {1, 5, 5, 8}; @@ -126,7 +126,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, schema::ActivationType // final output auto output = std::make_unique(); - output->nodeType = schema::NodeType::NodeType_Parameter; + output->nodeType = lite::NodeType_Parameter; output->format = schema::Format_NHWC; output->dataType = TypeId::kNumberTypeFloat32; output->dims = {1, 5, 5, 8}; diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc index 74b30b4904..ea729b6d6f 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc @@ -100,7 +100,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, schema::PrimitiveType // input 0: data auto input0 = std::make_unique(); - input0->nodeType = schema::NodeType::NodeType_ValueNode; + input0->nodeType = lite::NodeType_ValueNode; input0->format = schema::Format_NHWC; input0->dataType = TypeId::kNumberTypeFloat32; input0->dims = {1, 5, 5, 3}; @@ -109,7 +109,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, schema::PrimitiveType // input 1: weight auto input1 = std::make_unique(); - input1->nodeType = schema::NodeType::NodeType_ValueNode; + input1->nodeType = lite::NodeType_ValueNode; input1->format = schema::Format_KHWC; input1->dataType = TypeId::kNumberTypeFloat32; input1->dims = {8, 3, 3, 3}; @@ -118,7 +118,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, schema::PrimitiveType // conv output auto conv_output = std::make_unique(); - conv_output->nodeType = schema::NodeType::NodeType_Parameter; + conv_output->nodeType = lite::NodeType_Parameter; conv_output->format = schema::Format_NHWC; conv_output->dataType = TypeId::kNumberTypeFloat32; conv_output->dims = {1, 5, 5, 8}; @@ -126,7 +126,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, schema::PrimitiveType // input2: bias auto input2 = std::make_unique(); - input2->nodeType = schema::NodeType::NodeType_ValueNode; + input2->nodeType = lite::NodeType_ValueNode; input2->format = schema::Format_NHWC; input2->dataType = TypeId::kNumberTypeFloat32; input2->dims = {1, 5, 5, 8}; @@ -135,7 +135,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, schema::PrimitiveType // final output auto output = std::make_unique(); - output->nodeType = schema::NodeType::NodeType_Parameter; + output->nodeType = lite::NodeType_Parameter; output->format = schema::Format_NHWC; output->dataType = TypeId::kNumberTypeFloat32; output->dims = {1, 5, 5, 8}; diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc index 3922f13822..5c2c9aa5d3 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc @@ -98,7 +98,7 @@ MetaGraphTptr BuildCaffeGraph(schema::PrimitiveType conv_type) { // input 0: data auto input0 = std::make_unique(); - input0->nodeType = schema::NodeType::NodeType_ValueNode; + input0->nodeType = lite::NodeType_ValueNode; input0->format = schema::Format_NHWC; input0->dataType = TypeId::kNumberTypeFloat32; input0->dims = {1, 5, 5, 3}; @@ -107,7 +107,7 @@ MetaGraphTptr BuildCaffeGraph(schema::PrimitiveType conv_type) { // input 1: weight auto input1 = std::make_unique(); - input1->nodeType = schema::NodeType::NodeType_ValueNode; + input1->nodeType = lite::NodeType_ValueNode; input1->format = schema::Format_KHWC; input1->dataType = TypeId::kNumberTypeFloat32; input1->dims = {8, 3, 3, 3}; @@ -116,7 +116,7 @@ MetaGraphTptr BuildCaffeGraph(schema::PrimitiveType conv_type) { // conv output auto conv_output = std::make_unique(); - conv_output->nodeType = schema::NodeType::NodeType_Parameter; + conv_output->nodeType = lite::NodeType_Parameter; conv_output->format = schema::Format_NHWC; conv_output->dataType = TypeId::kNumberTypeFloat32; conv_output->dims = {1, 5, 5, 8}; @@ -124,7 +124,7 @@ MetaGraphTptr BuildCaffeGraph(schema::PrimitiveType conv_type) { // caffe bn : mean auto input2 = std::make_unique(); - input2->nodeType = schema::NodeType::NodeType_ValueNode; + input2->nodeType = lite::NodeType_ValueNode; input2->format = schema::Format_NHWC; input2->dataType = TypeId::kNumberTypeFloat32; input2->dims = {1, 5, 5, 8}; @@ -133,7 +133,7 @@ MetaGraphTptr BuildCaffeGraph(schema::PrimitiveType conv_type) { // caffe bn : var auto input3 = std::make_unique(); - input3->nodeType = schema::NodeType::NodeType_ValueNode; + input3->nodeType = lite::NodeType_ValueNode; input3->format = schema::Format_NHWC; input3->dataType = TypeId::kNumberTypeFloat32; input3->dims = {1, 5, 5, 8}; @@ -142,7 +142,7 @@ MetaGraphTptr BuildCaffeGraph(schema::PrimitiveType conv_type) { // final bn output auto output = std::make_unique(); - output->nodeType = schema::NodeType::NodeType_Parameter; + output->nodeType = lite::NodeType_Parameter; output->format = schema::Format_NHWC; output->dataType = TypeId::kNumberTypeFloat32; output->dims = {1, 5, 5, 8}; @@ -179,7 +179,7 @@ MetaGraphTptr BuildTFGraph(schema::PrimitiveType conv_type) { // input 0: data auto input0 = std::make_unique(); - input0->nodeType = schema::NodeType::NodeType_ValueNode; + input0->nodeType = lite::NodeType_ValueNode; input0->format = schema::Format_NHWC; input0->dataType = TypeId::kNumberTypeFloat32; input0->dims = {1, 5, 5, 3}; @@ -188,7 +188,7 @@ MetaGraphTptr BuildTFGraph(schema::PrimitiveType conv_type) { // input 1: conv_bias auto input11 = std::make_unique(); - input11->nodeType = schema::NodeType::NodeType_ValueNode; + input11->nodeType = lite::NodeType_ValueNode; input11->format = schema::Format_KHWC; input11->dataType = TypeId::kNumberTypeFloat32; input11->dims = {8, 3, 3, 3}; @@ -197,7 +197,7 @@ MetaGraphTptr BuildTFGraph(schema::PrimitiveType conv_type) { // input 1: weight auto input1 = std::make_unique(); - input1->nodeType = schema::NodeType::NodeType_ValueNode; + input1->nodeType = lite::NodeType_ValueNode; input1->format = schema::Format_KHWC; input1->dataType = TypeId::kNumberTypeFloat32; input1->dims = {8, 3, 3, 3}; @@ -206,7 +206,7 @@ MetaGraphTptr BuildTFGraph(schema::PrimitiveType conv_type) { // conv output auto conv_output = std::make_unique(); - conv_output->nodeType = schema::NodeType::NodeType_Parameter; + conv_output->nodeType = lite::NodeType_Parameter; conv_output->format = schema::Format_NHWC; conv_output->dataType = TypeId::kNumberTypeFloat32; conv_output->dims = {1, 5, 5, 8}; @@ -214,7 +214,7 @@ MetaGraphTptr BuildTFGraph(schema::PrimitiveType conv_type) { // tflite bn : scale auto input2 = std::make_unique(); - input2->nodeType = schema::NodeType::NodeType_ValueNode; + input2->nodeType = lite::NodeType_ValueNode; input2->format = schema::Format_NHWC; input2->dataType = TypeId::kNumberTypeFloat32; input2->dims = {1, 5, 5, 8}; @@ -223,7 +223,7 @@ MetaGraphTptr BuildTFGraph(schema::PrimitiveType conv_type) { // tflite bn : bias auto input3 = std::make_unique(); - input3->nodeType = schema::NodeType::NodeType_ValueNode; + input3->nodeType = lite::NodeType_ValueNode; input3->format = schema::Format_NHWC; input3->dataType = TypeId::kNumberTypeFloat32; input3->dims = {1, 5, 5, 8}; @@ -232,7 +232,7 @@ MetaGraphTptr BuildTFGraph(schema::PrimitiveType conv_type) { // tflite bn : mean auto input4 = std::make_unique(); - input4->nodeType = schema::NodeType::NodeType_ValueNode; + input4->nodeType = lite::NodeType_ValueNode; input4->format = schema::Format_NHWC; input4->dataType = TypeId::kNumberTypeFloat32; input4->dims = {1, 5, 5, 8}; @@ -241,7 +241,7 @@ MetaGraphTptr BuildTFGraph(schema::PrimitiveType conv_type) { // tflite bn : var auto input5 = std::make_unique(); - input5->nodeType = schema::NodeType::NodeType_ValueNode; + input5->nodeType = lite::NodeType_ValueNode; input5->format = schema::Format_NHWC; input5->dataType = TypeId::kNumberTypeFloat32; input5->dims = {1, 5, 5, 8}; @@ -250,7 +250,7 @@ MetaGraphTptr BuildTFGraph(schema::PrimitiveType conv_type) { // final output auto output = std::make_unique(); - output->nodeType = schema::NodeType::NodeType_Parameter; + output->nodeType = lite::NodeType_Parameter; output->format = schema::Format_NHWC; output->dataType = TypeId::kNumberTypeFloat32; output->dims = {1, 5, 5, 8}; diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc index d72650698a..7ddaedc3f8 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc @@ -115,7 +115,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, bool conv_with_bias) { // input 0: data auto input0 = std::make_unique(); - input0->nodeType = schema::NodeType::NodeType_ValueNode; + input0->nodeType = lite::NodeType_ValueNode; input0->format = schema::Format_NHWC; input0->dataType = TypeId::kNumberTypeFloat32; input0->dims = {1, 5, 5, 3}; @@ -124,7 +124,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, bool conv_with_bias) { // input 1: weight auto input1 = std::make_unique(); - input1->nodeType = schema::NodeType::NodeType_ValueNode; + input1->nodeType = lite::NodeType_ValueNode; input1->format = schema::Format_KHWC; input1->dataType = TypeId::kNumberTypeFloat32; input1->dims = {8, 3, 3, 3}; @@ -134,7 +134,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, bool conv_with_bias) { if (conv_with_bias) { // input 00: bias auto input00 = std::make_unique(); - input00->nodeType = schema::NodeType::NodeType_ValueNode; + input00->nodeType = lite::NodeType_ValueNode; input00->format = schema::Format_NHWC; input00->dataType = TypeId::kNumberTypeFloat32; input00->dims = {1, 5, 5, 3}; @@ -144,7 +144,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, bool conv_with_bias) { // conv output auto conv_output = std::make_unique(); - conv_output->nodeType = schema::NodeType::NodeType_Parameter; + conv_output->nodeType = lite::NodeType_Parameter; conv_output->format = schema::Format_NHWC; conv_output->dataType = TypeId::kNumberTypeFloat32; conv_output->dims = {1, 5, 5, 8}; @@ -152,7 +152,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, bool conv_with_bias) { // scale weight input auto input2 = std::make_unique(); - input2->nodeType = schema::NodeType::NodeType_ValueNode; + input2->nodeType = lite::NodeType_ValueNode; input2->format = schema::Format_NHWC; input2->dataType = TypeId::kNumberTypeFloat32; input2->dims = {1, 5, 5, 8}; @@ -161,7 +161,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, bool conv_with_bias) { // scale bias input auto input3 = std::make_unique(); - input3->nodeType = schema::NodeType::NodeType_ValueNode; + input3->nodeType = lite::NodeType_ValueNode; input3->format = schema::Format_NHWC; input3->dataType = TypeId::kNumberTypeFloat32; input3->dims = {1, 5, 5, 8}; @@ -170,7 +170,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, bool conv_with_bias) { // final scale output auto output = std::make_unique(); - output->nodeType = schema::NodeType::NodeType_Parameter; + output->nodeType = lite::NodeType_Parameter; output->format = schema::Format_NHWC; output->dataType = TypeId::kNumberTypeFloat32; output->dims = {1, 5, 5, 8}; diff --git a/mindspore/lite/tools/anf_exporter/anf_exporter.cc b/mindspore/lite/tools/anf_exporter/anf_exporter.cc index 7a7207c9c7..93a7f0ef9c 100644 --- a/mindspore/lite/tools/anf_exporter/anf_exporter.cc +++ b/mindspore/lite/tools/anf_exporter/anf_exporter.cc @@ -268,8 +268,8 @@ int AnfExporter::SetGraphInputIndex(const std::unique_ptr &m for (auto &node : subgraph_input_nodes) { for (auto input : node->inputIndex) { auto tensor = meta_graphT->allTensors[input].get(); - if (tensor->nodeType != schema::NodeType_CNode && tensor->data.empty()) { - tensor->nodeType = schema::NodeType_ValueNode; + if (tensor->nodeType != NodeType_CNode && tensor->data.empty()) { + tensor->nodeType = NodeType_ValueNode; tensor->format = schema::Format_NHWC; if (!IsContain(subgraph->inputIndices, input)) { if (subgraph_index == kMainGraphIndex) { @@ -386,7 +386,6 @@ int AnfExporter::Anf2Fb(const FuncGraphPtr &func_graph, const std::unique_ptrinput(0)); } - node->nodeType = schema::NodeType_CNode; node->name = cnode->fullname_with_scope(); node->primitive = std::unique_ptr(primT); ret = SetOpInputNode(cnode, meta_graphT, node.get()); @@ -622,7 +621,7 @@ int AnfExporter::ProcessTensor(const ValueNodePtr &valueNode, std::unique_ptr(value); }); (*paramTensor)->dims = dims; if (train_flag && (*paramTensor)->dims.empty()) (*paramTensor)->dims = {1}; - (*paramTensor)->nodeType = schema::NodeType::NodeType_ValueNode; + (*paramTensor)->nodeType = NodeType_ValueNode; auto data = value->cast(); (*paramTensor)->data.resize(data->Size()); ret = memcpy_s((*paramTensor)->data.data(), data->Size(), data->data_c(), data->Size()); @@ -642,7 +641,7 @@ int AnfExporter::ProcessInt32OrInt64Imm(const ValueNodePtr &valueNode, std::uniq // data of int64 is converted to int32 here. (*paramTensor)->dataType = kNumberTypeInt32; (*paramTensor)->dims = {1}; - (*paramTensor)->nodeType = schema::NodeType::NodeType_ValueNode; + (*paramTensor)->nodeType = NodeType_ValueNode; int real_data = opt::CastToInt(value).front(); (*paramTensor)->data.resize(sizeof(int32_t)); ret = memcpy_s((*paramTensor)->data.data(), sizeof(int32_t), &real_data, sizeof(int32_t)); @@ -663,7 +662,7 @@ void AnfExporter::ProcessBoolImm(const ValueNodePtr &valueNode, std::unique_ptr< auto typePtr = abstractScalar->GetTypeTrack(); (*paramTensor)->dataType = typePtr->type_id(); (*paramTensor)->dims = {1}; - (*paramTensor)->nodeType = schema::NodeType_ValueNode; + (*paramTensor)->nodeType = NodeType_ValueNode; auto data = value->cast(); (*paramTensor)->data.emplace_back(data->value()); node_id_map_[valueNode->fullname_with_scope()] = meta_graphT->allTensors.size(); @@ -681,7 +680,7 @@ int AnfExporter::ProcessNumber(const ValueNodePtr &valueNode, schema::TensorT *p } paramTensor->dataType = kNumberTypeInt32; paramTensor->dims = {1}; - paramTensor->nodeType = schema::NodeType_ValueNode; + paramTensor->nodeType = NodeType_ValueNode; node_id_map_[valueNode->fullname_with_scope()] = meta_graphT->allTensors.size(); output_cnode->inputIndex.emplace_back(meta_graphT->allTensors.size()); meta_graphT->allTensors.emplace_back(paramTensor); @@ -691,7 +690,7 @@ void AnfExporter::ProcessInt(const ValueNodePtr &valueNode, std::unique_ptr &meta_graphT) { (*paramTensor)->dataType = kNumberTypeInt32; (*paramTensor)->dims = {1}; - (*paramTensor)->nodeType = schema::NodeType_ValueNode; + (*paramTensor)->nodeType = NodeType_ValueNode; (*paramTensor)->data.emplace_back(kNumberTypeInt32); node_id_map_[valueNode->fullname_with_scope()] = meta_graphT->allTensors.size(); output_cnode->inputIndex.emplace_back(meta_graphT->allTensors.size()); @@ -721,7 +720,7 @@ int AnfExporter::ProcessValueSequence(const ValueNodePtr &valueNode, std::unique } (*paramTensor)->dataType = kNumberTypeInt32; (*paramTensor)->dims = {static_cast(shape.size())}; - (*paramTensor)->nodeType = schema::NodeType_ValueNode; + (*paramTensor)->nodeType = NodeType_ValueNode; (*paramTensor)->data.resize(shape.size() * sizeof(int)); ret = memcpy_s((*paramTensor)->data.data(), shape.size() * sizeof(int32_t), shape.data(), shape.size() * sizeof(int32_t)); @@ -862,7 +861,7 @@ void AnfExporter::SetOpOutputNode(const CNodePtr &cnode, const std::unique_ptrnodeType = schema::NodeType_CNode; + msTensor->nodeType = NodeType_CNode; fb_node->outputIndex.emplace_back(meta_graphT->allTensors.size()); if (train_flag) { std::string name = cnode_name + "_o:" + std::to_string(i); @@ -912,7 +911,7 @@ void AnfExporter::SetOpOutputNode(const CNodePtr &cnode, const std::unique_ptrtype_id(); } ms_tensor->dataType = type; - ms_tensor->nodeType = schema::NodeType_CNode; + ms_tensor->nodeType = NodeType_CNode; ms_tensor->name = cnode_name; fb_node->outputIndex.emplace_back(meta_graphT->allTensors.size()); node_id_map_[cnode_name] = meta_graphT->allTensors.size(); diff --git a/mindspore/lite/tools/common/graph_util.cc b/mindspore/lite/tools/common/graph_util.cc index 52182389f2..a99568c60c 100644 --- a/mindspore/lite/tools/common/graph_util.cc +++ b/mindspore/lite/tools/common/graph_util.cc @@ -445,7 +445,7 @@ NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, si MS_LOG(ERROR) << "Copy Tensor failed"; return graphT->nodes.end(); } - toAddTensor->nodeType = schema::NodeType_CNode; + toAddTensor->nodeType = NodeType_CNode; toAddTensor->refCount = 0; toAddTensor->data.clear(); MS_ASSERT(toAddNodeIn->primitive != nullptr); @@ -517,7 +517,7 @@ NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, siz *errorCode = RET_NULL_PTR; return graphT->nodes.end(); } - toAddTensor->nodeType = schema::NodeType_CNode; + toAddTensor->nodeType = NodeType_CNode; MS_ASSERT(toAddNodeIn->primitive != nullptr); if (toAddNodeIn->primitive->value.type == schema::PrimitiveType_QuantDTypeCast) { auto prim = toAddNodeIn->primitive->value.AsQuantDTypeCast(); diff --git a/mindspore/lite/tools/common/tensor_util.h b/mindspore/lite/tools/common/tensor_util.h index 0232e45c23..9db927308b 100644 --- a/mindspore/lite/tools/common/tensor_util.h +++ b/mindspore/lite/tools/common/tensor_util.h @@ -26,6 +26,7 @@ #include "schema/inner/model_generated.h" #include "src/common/log_adapter.h" #include "ir/dtype/type_id.h" +#include "src/common/utils.h" namespace mindspore { namespace lite { @@ -69,9 +70,9 @@ class TensorCache { index++; if (Category == CONST || Category == TF_CONST || Category == GRAPH_INPUT) { tensor->refCount = 1; - tensor->nodeType = schema::NodeType_ValueNode; + tensor->nodeType = NodeType_ValueNode; } else { - tensor->nodeType = schema::NodeType_Parameter; + tensor->nodeType = NodeType_Parameter; } tensor->name = name; tensors.push_back(tensor); diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.cc index 85bae4edf8..3336a5a476 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.cc @@ -80,7 +80,7 @@ STATUS MatMulBiasAddFusionPass::DoFusion(MetaGraphT *graph, const std::string &p MS_ASSERT(graph->allTensors.size() > baNodeInputIndex.at(BIASADD_OP_BIAS_INDEX)); const auto &baNodeBiasTensor = graph->allTensors.at(baNodeInputIndex.at(BIASADD_OP_BIAS_INDEX)); MS_ASSERT(baNodeBiasTensor != nullptr); - if (baNodeBiasTensor->refCount != schema::NodeType::NodeType_ValueNode) { + if (baNodeBiasTensor->refCount != NodeType_ValueNode) { // dont fusion, return return RET_OK; } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc index d72bd29005..163bf2778d 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc @@ -94,7 +94,7 @@ STATUS MulAddFusionPass::DoFusion(MetaGraphT *graph, const std::string &patternN MS_ASSERT(graph->allTensors.size() > mulNodeInputIndex.at(MUL_OP_BIAS_INDEX)); const auto &mulNodeBiasTensor = graph->allTensors.at(mulNodeInputIndex.at(MUL_OP_BIAS_INDEX)); MS_ASSERT(mulNodeBiasTensor != nullptr); - if (mulNodeBiasTensor->nodeType != schema::NodeType::NodeType_ValueNode) { + if (mulNodeBiasTensor->nodeType != NodeType_ValueNode) { // dont fusion, return return RET_OK; } @@ -111,7 +111,7 @@ STATUS MulAddFusionPass::DoFusion(MetaGraphT *graph, const std::string &patternN MS_ASSERT(graph->allTensors.size() > addNodeInputIndex.at(ADD_OP_BIAS_INDEX)); const auto &addNodeBiasTensor = graph->allTensors.at(addNodeInputIndex.at(ADD_OP_BIAS_INDEX)); MS_ASSERT(addNodeBiasTensor != nullptr); - if (addNodeBiasTensor->nodeType != schema::NodeType::NodeType_ValueNode) { + if (addNodeBiasTensor->nodeType != NodeType_ValueNode) { // dont fusion, return return RET_OK; } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/batchnorm_convert_scale_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/batchnorm_convert_scale_pass.cc index 9606195af2..573a05739a 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/batchnorm_convert_scale_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/batchnorm_convert_scale_pass.cc @@ -108,7 +108,7 @@ STATUS BatchNormConvertScalePass::GenNewScaleTensor(MetaGraphT *graph, const std } newScaleWeightTensor->dataType = bnMeanTensor->dataType; newScaleWeightTensor->format = bnMeanTensor->format; - newScaleWeightTensor->refCount = schema::NodeType::NodeType_ValueNode; + newScaleWeightTensor->refCount = NodeType_ValueNode; newScaleWeightTensor->dims = bnMeanTensor->dims; auto weightShapeSize = GetShapeSize(*bnMeanTensor); newScaleWeightTensor->data.resize(weightShapeSize * sizeof(float)); @@ -131,7 +131,7 @@ STATUS BatchNormConvertScalePass::GenNewScaleTensor(MetaGraphT *graph, const std newScaleBiasTensor->dataType = bnMeanTensor->dataType; newScaleBiasTensor->format = bnMeanTensor->format; - newScaleBiasTensor->refCount = schema::NodeType::NodeType_ValueNode; + newScaleBiasTensor->refCount = NodeType_ValueNode; newScaleBiasTensor->dims = bnMeanTensor->dims; weightShapeSize = GetShapeSize(*bnMeanTensor); newScaleBiasTensor->data.resize(weightShapeSize * sizeof(float)); diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/switch_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/switch_pass.cc index f5966043d4..9bd89ea7a2 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/switch_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/switch_pass.cc @@ -215,7 +215,7 @@ std::unique_ptr SingleSwitchPass::MakeMergeNode(const std::strin merge_node->inputIndex.push_back(old_merge_input[i]); } else { auto tensor = NewTensor(in_tensor); - tensor->nodeType = schema::NodeType_CNode; + tensor->nodeType = NodeType_CNode; graph_->allTensors.push_back(std::move(tensor)); merge_node->inputIndex.push_back(graph_->allTensors.size() - 1); } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.cc index 1e875ec8a6..ff891c9b0f 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.cc @@ -31,7 +31,7 @@ STATUS TopologicalSortPass::Run(schema::MetaGraphT *graph) { std::vector sinked_tensor_idxes; // put all const tensor index into sinked_tensor_idxes for (size_t i = 0; i < graph->allTensors.size(); i++) { - if (graph->allTensors.at(i)->nodeType == schema::NodeType::NodeType_ValueNode) { + if (graph->allTensors.at(i)->nodeType == NodeType_ValueNode) { sinked_tensor_idxes.insert(sinked_tensor_idxes.end(), i); } } diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc index 15bf3e32d2..3eb8b4b7e8 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc @@ -18,6 +18,7 @@ #include #include "securec/include/securec.h" #include "ir/dtype/type_id.h" +#include "src/common/utils.h" namespace mindspore { namespace lite { @@ -33,7 +34,7 @@ schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) { ConvertShape(proto, &shapeVec); weight->dims = shapeVec; weight->dataType = kNumberTypeFloat32; - weight->nodeType = schema::NodeType::NodeType_ValueNode; + weight->nodeType = NodeType_ValueNode; // cal Weight num int count = 1;