Browse Source

!4825 modify converter format

Merge pull request !4825 from lyvette/parser
tags/v0.7.0-beta
mindspore-ci-bot Gitee 5 years ago
parent
commit
7fa37623b2
61 changed files with 110 additions and 210 deletions
  1. +1
    -2
      mindspore/lite/tools/converter/parser/caffe/CMakeLists.txt
  2. +1
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.cc
  3. +1
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc
  4. +1
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc
  5. +2
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc
  6. +1
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc
  7. +3
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.cc
  8. +1
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc
  9. +1
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.cc
  10. +1
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc
  11. +9
    -9
      mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.cc
  12. +2
    -2
      mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc
  13. +1
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc
  14. +1
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.cc
  15. +1
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.cc
  16. +1
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc
  17. +0
    -67
      mindspore/lite/tools/converter/parser/caffe/caffe_proposal_parser.cc
  18. +0
    -36
      mindspore/lite/tools/converter/parser/caffe/caffe_proposal_parser.h
  19. +1
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.cc
  20. +1
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc
  21. +1
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.cc
  22. +1
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.cc
  23. +1
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.cc
  24. +1
    -1
      mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.cc
  25. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.cc
  26. +22
    -22
      mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.cc
  27. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.cc
  28. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.cc
  29. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.cc
  30. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.cc
  31. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.cc
  32. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.cc
  33. +2
    -2
      mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc
  34. +2
    -2
      mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc
  35. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.cc
  36. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.cc
  37. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.cc
  38. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.cc
  39. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.cc
  40. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.cc
  41. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.cc
  42. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.cc
  43. +11
    -10
      mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc
  44. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc
  45. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.cc
  46. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.cc
  47. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.cc
  48. +2
    -2
      mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc
  49. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.cc
  50. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.cc
  51. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.cc
  52. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.cc
  53. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.cc
  54. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.cc
  55. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.cc
  56. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.cc
  57. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.cc
  58. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_unsample_parser.cc
  59. +1
    -1
      mindspore/lite/tools/converter/parser/onnx/onnx_unsqueeze_parser.cc
  60. +2
    -2
      mindspore/lite/tools/converter/parser/onnx/onnx_unuseful_node_parser.cc
  61. +5
    -5
      mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc

+ 1
- 2
mindspore/lite/tools/converter/parser/caffe/CMakeLists.txt View File

@@ -27,5 +27,4 @@ add_library(caffe_parser_mid OBJECT
${CMAKE_CURRENT_SOURCE_DIR}/caffe_inspector.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_interp_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_permute_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_tile_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_proposal_parser.cc)
${CMAKE_CURRENT_SOURCE_DIR}/caffe_tile_parser.cc)

+ 1
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.cc View File

@@ -24,7 +24,7 @@ STATUS CaffeArgMaxParser::Parse(const caffe::LayerParameter &proto,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
op->name = proto.name();
std::unique_ptr<schema::ArgMaxT> attr(new schema::ArgMaxT());
std::unique_ptr<schema::ArgMaxT> attr = std::make_unique<schema::ArgMaxT>();
const caffe::ArgMaxParameter argmaxParam = proto.argmax_param();

int32_t axisType = 0;


+ 1
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc View File

@@ -32,7 +32,7 @@ STATUS CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caf
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
op->name = proto.name();
// caffe batch norm attr
std::unique_ptr<schema::BatchNormT> attr(new schema::BatchNormT());
std::unique_ptr<schema::BatchNormT> attr = std::make_unique<schema::BatchNormT>();
const caffe::BatchNormParameter batchNormParam = proto.batch_norm_param();

// check bottom size


+ 1
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc View File

@@ -26,7 +26,7 @@ STATUS CaffeConcatParser::Parse(const caffe::LayerParameter &proto,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
op->name = proto.name();
std::unique_ptr<schema::ConcatT> attr(new schema::ConcatT());
std::unique_ptr<schema::ConcatT> attr = std::make_unique<schema::ConcatT>();
const caffe::ConcatParameter concatParam = proto.concat_param();
if (concatParam.has_axis() && concatParam.has_concat_dim()) {
// MS_LOGE("Concat param in caffe have concat_dim and axis simultaneously,return fail");


+ 2
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc View File

@@ -24,7 +24,7 @@ void CaffeConvolutionParser::ParseGroupConvolution(schema::CNodeT *op, schema::C
if (attr == nullptr || attr->group == 1) {
return;
}
std::unique_ptr<schema::DepthwiseConv2DT> depthwiseConv2DParam(new schema::DepthwiseConv2DT());
std::unique_ptr<schema::DepthwiseConv2DT> depthwiseConv2DParam = std::make_unique<schema::DepthwiseConv2DT>();
if (depthwiseConv2DParam == nullptr) {
// MS_LOGW("new DepthwiseConv2DT failed");
return;
@@ -125,6 +125,7 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const c
if (status != RET_OK) {
MS_LOG(ERROR) << "ParseWeight for " << proto.name().c_str() << " failed";
}

return status;
}



+ 1
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc View File

@@ -25,7 +25,7 @@ STATUS CaffeCropParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::CropT> attr(new schema::CropT());
std::unique_ptr<schema::CropT> attr = std::make_unique<schema::CropT>();
if (!proto.has_crop_param()) {
attr->axis = CROP_AXIS;
std::vector<int64_t> offsets(2, 0);


+ 3
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.cc View File

@@ -24,7 +24,8 @@ void CaffeDeconvolutionParser::ParseGroupDeconvolution(schema::CNodeT *op, schem
return;
}

std::unique_ptr<schema::DeDepthwiseConv2DT> deDepthwiseConv2DParam(new schema::DeDepthwiseConv2DT());
std::unique_ptr<schema::DeDepthwiseConv2DT> deDepthwiseConv2DParam
= std::make_unique<schema::DeDepthwiseConv2DT>();
if (deDepthwiseConv2DParam == nullptr) {
MS_LOG(ERROR) << "new DeDepthwiseConv2DT failed";
return;
@@ -125,6 +126,7 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const
if (status != RET_OK) {
MS_LOG(ERROR) << "ParseWeight for " << proto.name().c_str() << " failed";
}

return status;
}



+ 1
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc View File

@@ -26,7 +26,7 @@ namespace mindspore {
namespace lite {
STATUS CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::EltwiseT> attr(new schema::EltwiseT());
std::unique_ptr<schema::EltwiseT> attr = std::make_unique<schema::EltwiseT>();
if (proto.bottom_size() < ELTWISE_MIN_INPUT_SIZE) {
MS_LOG(ERROR) << "Eltwise Op " << proto.name() << " need at least 2 inputs,but input size is "
<< proto.bottom_size();


+ 1
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.cc View File

@@ -22,7 +22,7 @@ namespace lite {
STATUS CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
const caffe::InnerProductParameter innerProductParam = proto.inner_product_param();
std::unique_ptr<schema::FullConnectionT> attr(new schema::FullConnectionT());
std::unique_ptr<schema::FullConnectionT> attr = std::make_unique<schema::FullConnectionT>();

if (!innerProductParam.has_num_output()) {
// MS_LOGE("InnerProduct Parse num_output for %s failed.", proto.name().c_str());


+ 1
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc View File

@@ -21,7 +21,7 @@ namespace mindspore {
namespace lite {
STATUS CaffeInterpParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::ResizeT> attr(new schema::ResizeT());
std::unique_ptr<schema::ResizeT> attr = std::make_unique<schema::ResizeT>();
const caffe::InterpParameter interpParam = proto.interp_param();
if (interpParam.has_height()) {
int64_t height = interpParam.height();


+ 9
- 9
mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.cc View File

@@ -33,7 +33,7 @@ const std::set<std::string> CaffeModelParser::skipedLayerType = {"Dropout"};

schema::MetaGraphT *CaffeModelParser::Parse(const std::string &modelFile, const std::string &weightFile,
const QuantType &quantType) {
std::unique_ptr<schema::MetaGraphT> graph(new schema::MetaGraphT());
// std::unique_ptr<schema::MetaGraphT> graph = std::make_unique<schema::MetaGraphT>();

if (ValidateFileStr(modelFile, ".prototxt") != RET_OK) {
MS_LOG(ERROR) << "INPUT ILLEGAL: modelFile must be *.prototxt";
@@ -50,7 +50,7 @@ schema::MetaGraphT *CaffeModelParser::Parse(const std::string &modelFile, const
return nullptr;
}

std::unique_ptr<schema::MetaGraphT> subGraphDef(new schema::MetaGraphT());
std::unique_ptr<schema::MetaGraphT> subGraphDef = std::make_unique<schema::MetaGraphT>();
TensorCache tensorCache;

caffe::NetParameter proto;
@@ -87,11 +87,11 @@ schema::MetaGraphT *CaffeModelParser::Parse(const std::string &modelFile, const
subGraphDef->name = GetModelName(modelFile);
// set all tensors to graph
SetAllTensors(tensorCache, subGraphDef.get());
graph = move(subGraphDef);
// graph = move(subGraphDef);

// ConvertCaffeBatchNorm(graph.get());

return graph.release();
return subGraphDef.release();
// return Fb2Anf(graph.release());
}

@@ -112,7 +112,7 @@ STATUS CaffeModelParser::SetOpInputIdx(const caffe::LayerParameter &layer, schem
STATUS CaffeModelParser::SetOpOutputIdx(const caffe::LayerParameter &layer, schema::CNodeT *op,
TensorCache *tensorCache) {
for (int i = 0; i < layer.top_size(); i++) {
std::unique_ptr<schema::TensorT> msTensor(new schema::TensorT());
std::unique_ptr<schema::TensorT> msTensor = std::make_unique<schema::TensorT>();
op->outputIndex.emplace_back(tensorCache->AddTensor(layer.top(i), msTensor.release(), OP_OUTPUT));
}
return RET_OK;
@@ -176,7 +176,7 @@ STATUS CaffeModelParser::ParseLayer(const caffe::NetParameter &proto, const caff
}
// todo y00520784 : layer.input_param().shape(0)
if (layer.type() == "Input") {
std::unique_ptr<schema::TensorT> msTensor(new schema::TensorT());
std::unique_ptr<schema::TensorT> msTensor = std::make_unique<schema::TensorT>();
for (int j = 0; j < layer.input_param().shape(0).dim_size(); j++) {
msTensor->dims.push_back(layer.input_param().shape(0).dim(j));
}
@@ -190,7 +190,7 @@ STATUS CaffeModelParser::ParseLayer(const caffe::NetParameter &proto, const caff
continue;
}

std::unique_ptr<schema::CNodeT> op(new schema::CNodeT());
std::unique_ptr<schema::CNodeT> op = std::make_unique<schema::CNodeT>();
op->name = layer.name();

// set op input index
@@ -234,7 +234,7 @@ STATUS CaffeModelParser::GetModelInput(const caffe::NetParameter &proto, TensorC
if (proto.input_dim_size() <= 0) {
continue;
}
std::unique_ptr<schema::TensorT> msTensor(new schema::TensorT());
std::unique_ptr<schema::TensorT> msTensor = std::make_unique<schema::TensorT>();
for (int j = 0; j < proto.input_dim_size(); j++) {
msTensor->dims.push_back(proto.input_dim(j));
}
@@ -245,7 +245,7 @@ STATUS CaffeModelParser::GetModelInput(const caffe::NetParameter &proto, TensorC

for (int i = 0; i < proto.input_shape_size(); i++) {
auto shape = proto.input_shape(i);
std::unique_ptr<schema::TensorT> msTensor(new schema::TensorT());
std::unique_ptr<schema::TensorT> msTensor = std::make_unique<schema::TensorT>();
for (int j = 0; j < shape.dim_size(); j++) {
msTensor->dims.push_back(shape.dim(j));
}


+ 2
- 2
mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc View File

@@ -22,7 +22,7 @@
namespace mindspore {
namespace lite {
schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) {
std::unique_ptr<schema::TensorT> weight(new schema::TensorT());
std::unique_ptr<schema::TensorT> weight = std::make_unique<schema::TensorT>();
weight->format = schema::Format_NCHW;
std::vector<int32_t> shapeVec;
ConvertShape(proto, &shapeVec);
@@ -46,7 +46,7 @@ schema::TensorT *ConvertWeight(const caffe::BlobProto &proto) {
}

// get weight
std::unique_ptr<float[]> buf(new (std::nothrow) float[count]());
std::unique_ptr<float[]> buf = std::make_unique<float[]>(count);
if (buf == nullptr) {
return nullptr;
}


+ 1
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc View File

@@ -24,7 +24,7 @@ STATUS CaffePermuteParser::Parse(const caffe::LayerParameter &proto,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
op->name = proto.name();
std::unique_ptr<schema::TransposeT> attr(new schema::TransposeT());
std::unique_ptr<schema::TransposeT> attr = std::make_unique<schema::TransposeT>();
const caffe::PermuteParameter permuteParam = proto.permute_param();

const int num_order_dims = permuteParam.order_size();


+ 1
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.cc View File

@@ -27,7 +27,7 @@ STATUS CaffePoolingParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::PoolingT> attr(new schema::PoolingT());
std::unique_ptr<schema::PoolingT> attr = std::make_unique<schema::PoolingT>();
attr->format = schema::Format_NCHW;

const caffe::PoolingParameter poolingParam = proto.pooling_param();


+ 1
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.cc View File

@@ -27,7 +27,7 @@ STATUS CaffePowerParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::PowerT> attr(new schema::PowerT());
std::unique_ptr<schema::PowerT> attr = std::make_unique<schema::PowerT>();
const caffe::PowerParameter powerParam = proto.power_param();
if (proto.has_power_param()) {
attr->power = powerParam.has_power() ? powerParam.power() : CAFFE_POWER_DEFAULT_POWER;


+ 1
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc View File

@@ -23,7 +23,7 @@ STATUS CaffePReluParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::CaffePReLUT> attr(new schema::CaffePReLUT());
std::unique_ptr<schema::CaffePReLUT> attr = std::make_unique<schema::CaffePReLUT>();
const caffe::PReLUParameter pReluParam = proto.prelu_param();
if (pReluParam.has_channel_shared()) {
attr->channelShared = pReluParam.channel_shared();


+ 0
- 67
mindspore/lite/tools/converter/parser/caffe/caffe_proposal_parser.cc View File

@@ -1,67 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_proposal_parser.h"

namespace mindspore {
namespace lite {
STATUS CaffeProposalParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::ProposalT> attr(new schema::ProposalT());
const caffe::ProposalParameter proposal_param = proto.proposal_param();

if (proposal_param.has_feat_stride()) {
attr->feat_stride = proposal_param.feat_stride();
}
if (proposal_param.has_base_size()) {
attr->base_size = proposal_param.base_size();
}
if (proposal_param.has_min_size()) {
attr->min_size = proposal_param.min_size();
}
if (proposal_param.has_pre_nms_topn()) {
attr->pre_nms_topn = proposal_param.pre_nms_topn();
}
if (proposal_param.has_post_nms_topn()) {
attr->post_nms_topn = proposal_param.post_nms_topn();
}
if (proposal_param.has_nms_thresh()) {
attr->nms_thresh = proposal_param.nms_thresh();
}
const int num_ratio = proposal_param.ratio_size();
attr->ratio.resize(num_ratio);
for (int i = 0; i < num_ratio; ++i) {
attr->ratio[i] = proposal_param.ratio(i);
}
const int num_scale = proposal_param.scale_size();
attr->scale.resize(num_scale);
for (int i = 0; i < num_scale; ++i) {
attr->scale[i] = proposal_param.scale(i);
}

op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.value = attr.release();
op->primitive->value.type = schema::PrimitiveType_Tile;
return RET_OK;
}

CaffeNodeRegistrar g_caffeProposalParser("Proposal", new CaffeProposalParser());
} // namespace lite
} // namespace mindspore

+ 0
- 36
mindspore/lite/tools/converter/parser/caffe/caffe_proposal_parser.h View File

@@ -1,36 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef LITE_CAFFE_PROPOSAL_PARSER_H
#define LITE_CAFFE_PROPOSAL_PARSER_H

#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser_registry.h"

namespace mindspore {
namespace lite {
class CaffeProposalParser : public CaffeNodeParser {
public:
CaffeProposalParser() : CaffeNodeParser("proposal") {}

STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;
};
} // namespace lite
} // namespace mindspore

#endif // LITE_CAFFE_PROPOSAL_PARSER_H

+ 1
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.cc View File

@@ -23,7 +23,7 @@ STATUS CaffeReluParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::ActivationT> attr(new schema::ActivationT());
std::unique_ptr<schema::ActivationT> attr = std::make_unique<schema::ActivationT>();
attr->type = schema::ActivationType_RELU;
// relu: negative_slope = 0, no parameter;
// leakyrelu: negative_slope != 0;


+ 1
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc View File

@@ -23,7 +23,7 @@ STATUS CaffeReshapeParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::ReshapeT> attr(new schema::ReshapeT());
std::unique_ptr<schema::ReshapeT> attr = std::make_unique<schema::ReshapeT>();
attr->format = schema::Format_NCHW;

const caffe::ReshapeParameter reshapeParam = proto.reshape_param();


+ 1
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.cc View File

@@ -24,7 +24,7 @@ namespace mindspore {
namespace lite {
STATUS CaffeScaleParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight,
schema::CNodeT *op, std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::ScaleT> attr(new schema::ScaleT());
std::unique_ptr<schema::ScaleT> attr = std::make_unique<schema::ScaleT>();

if (weight.blobs_size() + weight.bottom_size() < 2) {
// MS_LOGE("Scale bottom size:%d, blobs size:%d invalid in layer %s", weight.bottom_size(), weight.blobs_size(),


+ 1
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.cc View File

@@ -23,7 +23,7 @@ STATUS CaffeSigmoidParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::ActivationT> attr(new schema::ActivationT());
std::unique_ptr<schema::ActivationT> attr = std::make_unique<schema::ActivationT>();
attr->type = schema::ActivationType_SIGMOID;
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.value = attr.release();


+ 1
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.cc View File

@@ -26,7 +26,7 @@ STATUS CaffeSoftmaxParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::SoftMaxT> attr(new schema::SoftMaxT());
std::unique_ptr<schema::SoftMaxT> attr = std::make_unique<schema::SoftMaxT>();
if (proto.has_softmax_param() && proto.softmax_param().has_axis()) {
if (proto.softmax_param().axis() == -1) {
MS_LOG(ERROR) << "axis with -1 may lead to calculation errors when input less than 4 dims.";


+ 1
- 1
mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.cc View File

@@ -24,7 +24,7 @@ STATUS CaffeTileParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::TileT> attr(new schema::TileT());
std::unique_ptr<schema::TileT> attr = std::make_unique<schema::TileT>();
const caffe::TileParameter tile_param = proto.tile_param();

std::vector<int> dims;


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.cc View File

@@ -23,7 +23,7 @@ STATUS OnnxArgMaxParser::Parse(const onnx::GraphProto &onnx_graph,
const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx ArgMaxParser";
std::unique_ptr<schema::ArgMaxT> attr(new schema::ArgMaxT());
std::unique_ptr<schema::ArgMaxT> attr = std::make_unique<schema::ArgMaxT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto &attribute_name = onnx_node_attr.name();
if (attribute_name == "axis") {


+ 22
- 22
mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.cc View File

@@ -22,7 +22,7 @@ namespace lite {
STATUS OnnxAddParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx AddParser";
if (op != nullptr) {
std::unique_ptr<schema::AddT> attr(new schema::AddT());
std::unique_ptr<schema::AddT> attr = std::make_unique<schema::AddT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Add;
op->primitive->value.value = attr.release();
@@ -33,7 +33,7 @@ STATUS OnnxAddParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
STATUS OnnxSubParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx SubParser";
if (op != nullptr) {
std::unique_ptr<schema::SubT> attr(new schema::SubT());
std::unique_ptr<schema::SubT> attr = std::make_unique<schema::SubT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Sub;
op->primitive->value.value = attr.release();
@@ -44,7 +44,7 @@ STATUS OnnxSubParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
STATUS OnnxMulParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx MulParser";
if (op != nullptr) {
std::unique_ptr<schema::MulT> attr(new schema::MulT());
std::unique_ptr<schema::MulT> attr = std::make_unique<schema::MulT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Mul;
op->primitive->value.value = attr.release();
@@ -55,7 +55,7 @@ STATUS OnnxMulParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
STATUS OnnxDivParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx DivParser";
if (op != nullptr) {
std::unique_ptr<schema::DivT> attr(new schema::DivT());
std::unique_ptr<schema::DivT> attr = std::make_unique<schema::DivT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Div;
op->primitive->value.value = attr.release();
@@ -67,7 +67,7 @@ STATUS OnnxPowParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
MS_LOG(DEBUG) << "onnx PowParser";
if (op != nullptr) {
// TODO(wangzhe) attr power need populate
std::unique_ptr<schema::PowerT> attr(new schema::PowerT());
std::unique_ptr<schema::PowerT> attr = std::make_unique<schema::PowerT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Power;
op->primitive->value.value = attr.release();
@@ -78,7 +78,7 @@ STATUS OnnxEqualParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::No
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx EqualParser";
if (op != nullptr) {
std::unique_ptr<schema::EqualT> attr(new schema::EqualT());
std::unique_ptr<schema::EqualT> attr = std::make_unique<schema::EqualT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Equal;
op->primitive->value.value = attr.release();
@@ -89,7 +89,7 @@ STATUS OnnxEqualParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::No
STATUS OnnxLessParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx LessParser";
if (op != nullptr) {
std::unique_ptr<schema::LessT> attr(new schema::LessT());
std::unique_ptr<schema::LessT> attr = std::make_unique<schema::LessT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Less;
op->primitive->value.value = attr.release();
@@ -100,7 +100,7 @@ STATUS OnnxGreaterParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx GreaterParser";
if (op != nullptr) {
std::unique_ptr<schema::GreaterT> attr(new schema::GreaterT());
std::unique_ptr<schema::GreaterT> attr = std::make_unique<schema::GreaterT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Greater;
op->primitive->value.value = attr.release();
@@ -111,7 +111,7 @@ STATUS OnnxGreaterParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::
STATUS OnnxMinParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx MinParser";
if (op != nullptr) {
std::unique_ptr<schema::MinT> attr(new schema::MinT());
std::unique_ptr<schema::MinT> attr = std::make_unique<schema::MinT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Min;
op->primitive->value.value = attr.release();
@@ -122,7 +122,7 @@ STATUS OnnxMinParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
STATUS OnnxEltwiseParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx EltwiseParser";
std::unique_ptr<schema::EltwiseT> attr(new schema::EltwiseT());
std::unique_ptr<schema::EltwiseT> attr = std::make_unique<schema::EltwiseT>();
// there is no Prod in onnx
if (onnx_node.op_type() == "Sum") {
attr->mode = schema::EltwiseMode_SUM;
@@ -142,7 +142,7 @@ STATUS OnnxFloorParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::No
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx FloorParser";
if (op != nullptr) {
std::unique_ptr<schema::FloorT> attr(new schema::FloorT());
std::unique_ptr<schema::FloorT> attr = std::make_unique<schema::FloorT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Floor;
op->primitive->value.value = attr.release();
@@ -152,7 +152,7 @@ STATUS OnnxFloorParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::No
STATUS OnnxAbsParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx AbsParser";
if (op != nullptr) {
std::unique_ptr<schema::AbsT> attr(new schema::AbsT());
std::unique_ptr<schema::AbsT> attr = std::make_unique<schema::AbsT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Abs;
op->primitive->value.value = attr.release();
@@ -162,7 +162,7 @@ STATUS OnnxAbsParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
STATUS OnnxNegParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx NegParser";
if (op != nullptr) {
std::unique_ptr<schema::NegT> attr(new schema::NegT());
std::unique_ptr<schema::NegT> attr = std::make_unique<schema::NegT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Neg;
op->primitive->value.value = attr.release();
@@ -172,7 +172,7 @@ STATUS OnnxNegParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
STATUS OnnxExpParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx ExpParser";
if (op != nullptr) {
std::unique_ptr<schema::ExpT> attr(new schema::ExpT());
std::unique_ptr<schema::ExpT> attr = std::make_unique<schema::ExpT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Exp;
op->primitive->value.value = attr.release();
@@ -182,7 +182,7 @@ STATUS OnnxExpParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
STATUS OnnxCosParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx CosParser";
if (op != nullptr) {
std::unique_ptr<schema::CosT> attr(new schema::CosT());
std::unique_ptr<schema::CosT> attr = std::make_unique<schema::CosT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Cos;
op->primitive->value.value = attr.release();
@@ -192,7 +192,7 @@ STATUS OnnxCosParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
STATUS OnnxSinParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx SinParser";
if (op != nullptr) {
std::unique_ptr<schema::SinT> attr(new schema::SinT());
std::unique_ptr<schema::SinT> attr = std::make_unique<schema::SinT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Sin;
op->primitive->value.value = attr.release();
@@ -202,7 +202,7 @@ STATUS OnnxSinParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
STATUS OnnxSqrtParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx SqrtParser";
if (op != nullptr) {
std::unique_ptr<schema::SqrtT> attr(new schema::SqrtT());
std::unique_ptr<schema::SqrtT> attr = std::make_unique<schema::SqrtT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Sqrt;
op->primitive->value.value = attr.release();
@@ -212,7 +212,7 @@ STATUS OnnxSqrtParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
STATUS OnnxCeilParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx CeilParser";
if (op != nullptr) {
std::unique_ptr<schema::CeilT> attr(new schema::CeilT());
std::unique_ptr<schema::CeilT> attr = std::make_unique<schema::CeilT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Ceil;
op->primitive->value.value = attr.release();
@@ -222,7 +222,7 @@ STATUS OnnxCeilParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
STATUS OnnxLogParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx LogParser";
if (op != nullptr) {
std::unique_ptr<schema::LogT> attr(new schema::LogT());
std::unique_ptr<schema::LogT> attr = std::make_unique<schema::LogT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Log;
op->primitive->value.value = attr.release();
@@ -232,7 +232,7 @@ STATUS OnnxLogParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
STATUS OnnxTanParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx TanParser";
if (op != nullptr) {
std::unique_ptr<schema::TanT> attr(new schema::TanT());
std::unique_ptr<schema::TanT> attr = std::make_unique<schema::TanT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Tan;
op->primitive->value.value = attr.release();
@@ -242,7 +242,7 @@ STATUS OnnxTanParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Node
STATUS OnnxAtanParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx AtanParser";
if (op != nullptr) {
std::unique_ptr<schema::AtanT> attr(new schema::AtanT());
std::unique_ptr<schema::AtanT> attr = std::make_unique<schema::AtanT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Atan;
op->primitive->value.value = attr.release();
@@ -252,7 +252,7 @@ STATUS OnnxAtanParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
STATUS OnnxAsinParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx AsinParser";
if (op != nullptr) {
std::unique_ptr<schema::AsinT> attr(new schema::AsinT());
std::unique_ptr<schema::AsinT> attr = std::make_unique<schema::AsinT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Asin;
op->primitive->value.value = attr.release();


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.cc View File

@@ -22,7 +22,7 @@ namespace lite {
STATUS OnnxBatchNormParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx BatchNormParser";
std::unique_ptr<schema::FusedBatchNormT> attr(new schema::FusedBatchNormT());
std::unique_ptr<schema::FusedBatchNormT> attr = std::make_unique<schema::FusedBatchNormT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
if (onnx_node_attr.name() == "epsilon") {
attr->epsilon = onnx_node_attr.f();


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.cc View File

@@ -26,7 +26,7 @@ STATUS OnnxBiasAddParser::Parse(const onnx::GraphProto &onnx_graph,
const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx BiasAddParser";
std::unique_ptr<schema::BiasAddT> attr(new schema::BiasAddT());
std::unique_ptr<schema::BiasAddT> attr = std::make_unique<schema::BiasAddT>();
// use channel dim as axis
attr->axis = {1};
if (op != nullptr) {


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.cc View File

@@ -21,7 +21,7 @@ namespace mindspore {
namespace lite {
STATUS OnnxCastParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx CastParser";
std::unique_ptr<schema::CastT> attr(new schema::CastT());
std::unique_ptr<schema::CastT> attr = std::make_unique<schema::CastT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto &attribute_name = onnx_node_attr.name();
if (attribute_name == "to") {


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.cc View File

@@ -31,7 +31,7 @@ STATUS OnnxClipParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
}
}
if (min == 0 && max == 6) {
std::unique_ptr<schema::ActivationT> attr(new schema::ActivationT());
std::unique_ptr<schema::ActivationT> attr = std::make_unique<schema::ActivationT>();
attr->type = schema::ActivationType_RELU6;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.cc View File

@@ -23,7 +23,7 @@ STATUS OnnxConcatParser::Parse(const onnx::GraphProto &onnx_graph,
const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx ConcatParser";
std::unique_ptr<schema::ConcatT> attr(new schema::ConcatT());
std::unique_ptr<schema::ConcatT> attr = std::make_unique<schema::ConcatT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto &attribute_name = onnx_node_attr.name();
if (attribute_name == "axis") {


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.cc View File

@@ -24,7 +24,7 @@ STATUS OnnxConstantParser::Parse(const onnx::GraphProto &onnx_graph,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx ConstantParser";
if (op != nullptr) {
std::unique_ptr<schema::ConstantT> attr(new schema::ConstantT());
std::unique_ptr<schema::ConstantT> attr = std::make_unique<schema::ConstantT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Constant;
op->primitive->value.value = attr.release();


+ 2
- 2
mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc View File

@@ -26,7 +26,7 @@ bool OnnxConvParser::ParseGroupConvolution(const std::unique_ptr<schema::Conv2DT
if (attr == nullptr || attr->group != attr->channelIn) {
return false;
}
std::unique_ptr<schema::DepthwiseConv2DT> depthwiseConv2DParam(new (std::nothrow) schema::DepthwiseConv2DT());
std::unique_ptr<schema::DepthwiseConv2DT> depthwiseConv2DParam = std::make_unique<schema::DepthwiseConv2DT>();
if (depthwiseConv2DParam == nullptr) {
MS_LOG(ERROR) << "new DepthwiseConv2DT failed";
return false;
@@ -55,7 +55,7 @@ bool OnnxConvParser::ParseGroupConvolution(const std::unique_ptr<schema::Conv2DT

STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx ConvParser";
std::unique_ptr<schema::Conv2DT> attr(new (std::nothrow) schema::Conv2DT());
std::unique_ptr<schema::Conv2DT> attr = std::make_unique<schema::Conv2DT>();
// set opdef each attr params
for (const auto &onnx_node_attr : onnx_node.attribute()) {
if (onnx_node_attr.name() == "group") {


+ 2
- 2
mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc View File

@@ -26,7 +26,7 @@ bool OnnxDeConvParser::ParseGroupDeConvolution(const std::unique_ptr<schema::DeC
if (attr == nullptr || attr->group != attr->channelOut) {
return false;
}
std::unique_ptr<schema::DeDepthwiseConv2DT> deDepthwiseConv2DParam(new (std::nothrow) schema::DeDepthwiseConv2DT());
std::unique_ptr<schema::DeDepthwiseConv2DT> deDepthwiseConv2DParam = std::make_unique<schema::DeDepthwiseConv2DT>();
if (deDepthwiseConv2DParam == nullptr) {
MS_LOG(ERROR) << "new DeDepthwiseConv2DT failed";
return false;
@@ -58,7 +58,7 @@ bool OnnxDeConvParser::ParseGroupDeConvolution(const std::unique_ptr<schema::DeC

STATUS OnnxDeConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
std::unique_ptr<schema::DeConv2DT> attr(new (std::nothrow) schema::DeConv2DT());
std::unique_ptr<schema::DeConv2DT> attr = std::make_unique<schema::DeConv2DT>();
// set opdef each attr params
for (const auto &onnx_node_attr : onnx_node.attribute()) {
if (onnx_node_attr.name() == "group") {


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.cc View File

@@ -23,7 +23,7 @@ STATUS OnnxDepthToSpaceParser::Parse(const onnx::GraphProto &onnx_graph,
const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx DepthToSpaceParser";
std::unique_ptr<schema::DepthToSpaceT> attr(new schema::DepthToSpaceT());
std::unique_ptr<schema::DepthToSpaceT> attr = std::make_unique<schema::DepthToSpaceT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto& attribute_name = onnx_node_attr.name();
if (attribute_name == "blocksize") {


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.cc View File

@@ -23,7 +23,7 @@ STATUS OnnxDropoutParser::Parse(const onnx::GraphProto &onnx_graph,
const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx DropoutParser";
std::unique_ptr<schema::DropoutT> attr(new schema::DropoutT());
std::unique_ptr<schema::DropoutT> attr = std::make_unique<schema::DropoutT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto &attribute_name = onnx_node_attr.name();
if (attribute_name == "ratio") {


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.cc View File

@@ -21,7 +21,7 @@ namespace mindspore {
namespace lite {
STATUS OnnxEluParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx EluParser";
std::unique_ptr<schema::EluT> attr(new schema::EluT());
std::unique_ptr<schema::EluT> attr = std::make_unique<schema::EluT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto& attribute_name = onnx_node_attr.name();
if (attribute_name == "alpha") {


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.cc View File

@@ -23,7 +23,7 @@ STATUS OnnxExpandParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::N
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx ExpandParser";
if (op != nullptr) {
std::unique_ptr<schema::BroadcastT> attr(new schema::BroadcastT());
std::unique_ptr<schema::BroadcastT> attr = std::make_unique<schema::BroadcastT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Broadcast;
op->primitive->value.value = attr.release();


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.cc View File

@@ -23,7 +23,7 @@ STATUS OnnxFlattenParser::Parse(const onnx::GraphProto &onnx_graph,
const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx FlattenParser";
std::unique_ptr<schema::ReshapeT> attr(new schema::ReshapeT());
std::unique_ptr<schema::ReshapeT> attr = std::make_unique<schema::ReshapeT>();
int axis = 1;
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto &attribute_name = onnx_node_attr.name();


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.cc View File

@@ -23,7 +23,7 @@ STATUS OnnxGatherParser::Parse(const onnx::GraphProto &onnx_graph,
const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx GatherParser";
std::unique_ptr<schema::GatherT> attr(new schema::GatherT());
std::unique_ptr<schema::GatherT> attr = std::make_unique<schema::GatherT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto& attribute_name = onnx_node_attr.name();
if (attribute_name == "axis") {


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.cc View File

@@ -21,7 +21,7 @@ namespace mindspore {
namespace lite {
STATUS OnnxLrnParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx LrnParser";
std::unique_ptr<schema::LrnT> attr(new schema::LrnT());
std::unique_ptr<schema::LrnT> attr = std::make_unique<schema::LrnT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto& attribute_name = onnx_node_attr.name();
if (attribute_name == "size") {


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.cc View File

@@ -22,7 +22,7 @@ namespace lite {
STATUS OnnxMatmulParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx MatMulParser";
std::unique_ptr<schema::MatMulT> attr(new schema::MatMulT());
std::unique_ptr<schema::MatMulT> attr = std::make_unique<schema::MatMulT>();
float alpha = 1.0f;
float beta = 1.0f;
for (const auto &onnx_node_attr : onnx_node.attribute()) {


+ 11
- 10
mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc View File

@@ -77,6 +77,7 @@ STATUS OnnxModelParser::ReadOnnxModelFromBinary(const std::string &modelFile, go
return RET_ERROR;
}
(void)close(fd);
onnx_file.release();
return RET_OK;
}

@@ -124,7 +125,7 @@ STATUS OnnxModelParser::AddValueInfo(const onnx::ValueInfoProto &proto, const st
<< static_cast<onnx::TensorProto_DataType>(proto.type().tensor_type().elem_type());
return RET_ERROR;
}
std::unique_ptr<schema::TensorT> tensor(new schema::TensorT);
std::unique_ptr<schema::TensorT> tensor = std::make_unique<schema::TensorT>();
if (tensor == nullptr) {
MS_LOG(ERROR) << "new tensor failed";
return RET_ERROR;
@@ -144,7 +145,7 @@ STATUS OnnxModelParser::AddTensorProto(const onnx::TensorProto &proto, const std
return RET_ERROR;
}

std::unique_ptr<schema::TensorT> tensor(new (std::nothrow) schema::TensorT);
std::unique_ptr<schema::TensorT> tensor = std::make_unique<schema::TensorT>();
if (tensor == nullptr) {
MS_LOG(ERROR) << "new tensor failed";
return RET_ERROR;
@@ -197,7 +198,7 @@ STATUS OnnxModelParser::SetGraphOutputTensor(const onnx::GraphProto &onnx_graph,

void OnnxModelParser::ParseOnnxGemmNode(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::MetaGraphT *graph, TensorCache *tensor_cache) {
std::unique_ptr<schema::CNodeT> dst_op_1(new schema::CNodeT);
std::unique_ptr<schema::CNodeT> dst_op_1 = std::make_unique<schema::CNodeT>();
dst_op_1->name = "Gemm_MatMul_" + onnx_node.output(0);
ParseOnnxNodeAttr(onnx_graph, onnx_node, "MatMul", dst_op_1.get());
auto matmul_output_id = "Gemm_MatMul_" + onnx_node.output(0);
@@ -207,7 +208,7 @@ void OnnxModelParser::ParseOnnxGemmNode(const onnx::GraphProto &onnx_graph, cons
SetOpOutputIndex(matmul_outputs, dst_op_1.get(), tensor_cache);
graph->nodes.emplace_back(std::move(dst_op_1));

std::unique_ptr<schema::CNodeT> dst_op_2(new schema::CNodeT);
std::unique_ptr<schema::CNodeT> dst_op_2 = std::make_unique<schema::CNodeT>();
dst_op_2->name = "Gemm_BiasAdd_" + onnx_node.output(0);
ParseOnnxNodeAttr(onnx_graph, onnx_node, "BiasAdd", dst_op_2.get());
std::vector<string> biasadd_inputs{matmul_output_id, onnx_node.input(2)};
@@ -221,7 +222,7 @@ STATUS OnnxModelParser::ParseOnnxGivenFillNode(const onnx::NodeProto &onnx_node,
// convert GivenTensorFill node to a weight/bias tensor
auto ret = tensor_cache->FindTensor(onnx_node.output(0));
if (ret < 0) {
std::unique_ptr<schema::TensorT> tensor(new schema::TensorT);
std::unique_ptr<schema::TensorT> tensor = std::make_unique<schema::TensorT>();
std::vector<int> shape;
auto iter = std::find_if(onnx_node.attribute().begin(), onnx_node.attribute().end(),
[](const onnx::AttributeProto &attr) { return attr.name() == "shape"; });
@@ -329,7 +330,7 @@ void OnnxModelParser::SetOpQuantParams(const onnx::GraphProto &onnx_graph, const
}
size_t findQuantParams = 0;
for (const auto &node : quant_node) {
std::unique_ptr<schema::QuantParamT> quant_param(new (std::nothrow) schema::QuantParamT());
std::unique_ptr<schema::QuantParamT> quant_param = std::make_unique<schema::QuantParamT>();
if (quant_param == nullptr) {
MS_LOG(ERROR) << "new QuantParamT failed, node: " << dst_op->name;
return;
@@ -392,7 +393,7 @@ STATUS OnnxModelParser::SetOpOutputIndex(const std::vector<string> &node_outputs
for (const auto &onnx_node_output : node_outputs) {
auto index = tensor_cache->FindTensor(onnx_node_output);
if (index < 0) { // when index >= 0, it's graph's output
std::unique_ptr<schema::TensorT> tensor(new schema::TensorT);
std::unique_ptr<schema::TensorT> tensor = std::make_unique<schema::TensorT>();
tensor->nodeType = schema::NodeType_Parameter;
index = tensor_cache->AddTensor(onnx_node_output, tensor.release(), OP_OUTPUT);
}
@@ -489,7 +490,7 @@ MetaGraphT *OnnxModelParser::Parse(const std::string &modelFile, const std::stri
MS_LOG(ERROR) << "Input illegal: modelFile must be *.onnx";
return nullptr;
}
std::unique_ptr<schema::MetaGraphT> dst_graph(new schema::MetaGraphT());
std::unique_ptr<schema::MetaGraphT> dst_graph = std::make_unique<schema::MetaGraphT>();
onnx::ModelProto onnx_model;
if (ReadOnnxModelFromBinary(modelFile, &onnx_model) != RET_OK) {
MS_LOG(ERROR) << "read onnx model fail";
@@ -533,8 +534,8 @@ MetaGraphT *OnnxModelParser::Parse(const std::string &modelFile, const std::stri
continue;
}

std::unique_ptr<schema::CNodeT> dst_op(new schema::CNodeT);
std::unique_ptr<schema::TensorT> dst_tensor(new schema::TensorT);
std::unique_ptr<schema::CNodeT> dst_op = std::make_unique<schema::CNodeT>();
std::unique_ptr<schema::TensorT> dst_tensor = std::make_unique<schema::TensorT>();
auto status = ParseOnnxNodeToDstOp(onnx_graph, onnx_node, dst_op.get(), dst_tensor.get(), &tensor_cache);
if (status != RET_OK) {
MS_LOG(ERROR) << "parse node " << onnx_node.op_type() << " failed";


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc View File

@@ -26,7 +26,7 @@ schema::PadMode OnnxNodeParser::GetOnnxPadMode(const onnx::AttributeProto &onnx_
} else if (onnx_node_attr.s() == "VALID") {
return schema::PadMode_VALID;
} else {
// MS_LOGE("unsupported padMode");
MS_LOG(ERROR) << "unsupported padMode";
return schema::PadMode_NOTSET;
}
}


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.cc View File

@@ -21,7 +21,7 @@ namespace mindspore {
namespace lite {
STATUS OnnxPadParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx PadParser";
std::unique_ptr<schema::PadT> attr(new schema::PadT());
std::unique_ptr<schema::PadT> attr = std::make_unique<schema::PadT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto &attribute_name = onnx_node_attr.name();
if (attribute_name == "pads") {


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.cc View File

@@ -21,7 +21,7 @@ namespace mindspore {
namespace lite {
STATUS OnnxPoolParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx PoolParser";
std::unique_ptr<schema::PoolingT> attr(new schema::PoolingT());
std::unique_ptr<schema::PoolingT> attr = std::make_unique<schema::PoolingT>();

attr->format = schema::Format_NCHW;
const auto &pool_type = onnx_node.op_type();


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.cc View File

@@ -23,7 +23,7 @@ STATUS OnnxReduceParser::Parse(const onnx::GraphProto &onnx_graph,
const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx ReduceParser";
std::unique_ptr<schema::ReduceT> attr(new schema::ReduceT());
std::unique_ptr<schema::ReduceT> attr = std::make_unique<schema::ReduceT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto &attribute_name = onnx_node_attr.name();
if (attribute_name == "axes") {


+ 2
- 2
mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc View File

@@ -22,7 +22,7 @@ namespace mindspore {
namespace lite {
STATUS OnnxReluParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx ReluParser";
std::unique_ptr<schema::ActivationT> attr(new schema::ActivationT());
std::unique_ptr<schema::ActivationT> attr = std::make_unique<schema::ActivationT>();
const auto &relu_type = onnx_node.op_type();
if (relu_type == "Relu") {
attr->type = schema::ActivationType_RELU;
@@ -45,7 +45,7 @@ STATUS OnnxPReluParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::No
MS_LOG(ERROR) << "input num is not 2";
return RET_PARAM_INVALID;
}
std::unique_ptr<schema::CaffePReLUT> attr(new schema::CaffePReLUT());
std::unique_ptr<schema::CaffePReLUT> attr = std::make_unique<schema::CaffePReLUT>();
std::vector<onnx::TensorProto> params;
const auto &input_name = onnx_node.input(1);
for (const auto &it : onnx_graph.initializer()) {


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.cc View File

@@ -23,7 +23,7 @@ namespace lite {
STATUS OnnxReshapeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx ReshapeParser";
std::unique_ptr<schema::ReshapeT> attr(new schema::ReshapeT());
std::unique_ptr<schema::ReshapeT> attr = std::make_unique<schema::ReshapeT>();
attr->format = schema::Format_NCHW;
std::vector<onnx::TensorProto> params;
// TODO(wangzhe) shape may also come from other op, there need refactor to introduce tensor_cache


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.cc View File

@@ -24,7 +24,7 @@ STATUS OnnxShapeParser::Parse(const onnx::GraphProto &onnx_graph,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx ShapeParser";
if (op != nullptr) {
std::unique_ptr<schema::ShapeT> attr(new schema::ShapeT());
std::unique_ptr<schema::ShapeT> attr = std::make_unique<schema::ShapeT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Shape;
op->primitive->value.value = attr.release();


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.cc View File

@@ -23,7 +23,7 @@ STATUS OnnxSigmoidParser::Parse(const onnx::GraphProto &onnx_graph,
const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx SigmoidParser";
std::unique_ptr<schema::ActivationT> attr(new schema::ActivationT());
std::unique_ptr<schema::ActivationT> attr = std::make_unique<schema::ActivationT>();
attr->type = schema::ActivationType_SIGMOID;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.cc View File

@@ -22,7 +22,7 @@ namespace lite {
STATUS OnnxSliceParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx SliceParser";
std::unique_ptr<schema::SliceT> attr(new schema::SliceT());
std::unique_ptr<schema::SliceT> attr = std::make_unique<schema::SliceT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto &attribute_name = onnx_node_attr.name();
if (attribute_name == "starts") {


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.cc View File

@@ -23,7 +23,7 @@ STATUS OnnxSoftMaxParser::Parse(const onnx::GraphProto &onnx_graph,
const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx SoftMaxParser";
std::unique_ptr<schema::SoftMaxT> attr(new schema::SoftMaxT());
std::unique_ptr<schema::SoftMaxT> attr = std::make_unique<schema::SoftMaxT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto& attribute_name = onnx_node_attr.name();
if (attribute_name == "axis") {


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.cc View File

@@ -22,7 +22,7 @@ namespace lite {
STATUS OnnxSpaceToDepthParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx SpaceToDepthParser";
std::unique_ptr<schema::SpaceToDepthT> attr(new schema::SpaceToDepthT());
std::unique_ptr<schema::SpaceToDepthT> attr = std::make_unique<schema::SpaceToDepthT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto &attribute_name = onnx_node_attr.name();
if (attribute_name == "blocksize") {


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.cc View File

@@ -23,7 +23,7 @@ STATUS OnnxSqueezeParser::Parse(const onnx::GraphProto &onnx_graph,
const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx SqueezeParser";
std::unique_ptr<schema::SqueezeT> attr(new schema::SqueezeT());
std::unique_ptr<schema::SqueezeT> attr = std::make_unique<schema::SqueezeT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto &attribute_name = onnx_node_attr.name();
if (attribute_name == "axes") {


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.cc View File

@@ -22,7 +22,7 @@ namespace lite {
STATUS OnnxTileParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx TileParser";
if (op != nullptr) {
std::unique_ptr<schema::TileT> attr(new schema::TileT());
std::unique_ptr<schema::TileT> attr = std::make_unique<schema::TileT>();
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Tile;
op->primitive->value.value = attr.release();


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.cc View File

@@ -23,7 +23,7 @@ STATUS OnnxTransposeParser::Parse(const onnx::GraphProto &onnx_graph,
const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx TransposeParser";
std::unique_ptr<schema::TransposeT> attr(new schema::TransposeT());
std::unique_ptr<schema::TransposeT> attr = std::make_unique<schema::TransposeT>();
attr->conjugate = false;
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto &attribute_name = onnx_node_attr.name();


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_unsample_parser.cc View File

@@ -23,7 +23,7 @@ STATUS OnnxUpsampleParser::Parse(const onnx::GraphProto &onnx_graph,
const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx UpsampleParser";
std::unique_ptr<schema::UpsampleT> attr(new schema::UpsampleT());
std::unique_ptr<schema::UpsampleT> attr = std::make_unique<schema::UpsampleT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto &attribute_name = onnx_node_attr.name();
if (attribute_name == "mode") {


+ 1
- 1
mindspore/lite/tools/converter/parser/onnx/onnx_unsqueeze_parser.cc View File

@@ -22,7 +22,7 @@ namespace lite {
STATUS OnnxUnSqueezeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node,
schema::CNodeT *op) {
MS_LOG(DEBUG) << "onnx UnSqueezeParser";
std::unique_ptr<schema::UnsqueezeT> attr(new schema::UnsqueezeT());
std::unique_ptr<schema::UnsqueezeT> attr = std::make_unique<schema::UnsqueezeT>();
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto &attribute_name = onnx_node_attr.name();
if (attribute_name == "axes") {


+ 2
- 2
mindspore/lite/tools/converter/parser/onnx/onnx_unuseful_node_parser.cc View File

@@ -27,10 +27,10 @@ STATUS OnnxUnusefulNodeParser::Parse(const onnx::GraphProto &onnx_graph,
op->primitive = std::make_unique<schema::PrimitiveT>();
if (onnx_node.op_type() == "Int8Quantize") {
op->primitive->value.type = schema::PrimitiveType_OnnxInt8Quantize;
op->primitive->value.value = new (std::nothrow) schema::OnnxInt8QuantizeT;
op->primitive->value.value = std::make_unique<schema::OnnxInt8QuantizeT>().release();
} else if (onnx_node.op_type() == "Int8Dequantize") {
op->primitive->value.type = schema::PrimitiveType_OnnxInt8Dequantize;
op->primitive->value.value = new (std::nothrow) schema::OnnxInt8DequantizeT;
op->primitive->value.value = std::make_unique<schema::OnnxInt8DequantizeT>().release();
} else {
// MS_LOGE("Unsupported nodeType: %s", onnx_node.op_type().c_str());
return RET_ERROR;


+ 5
- 5
mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc View File

@@ -64,7 +64,7 @@ STATUS TfliteModelParser::CopyConstTensorData(const std::vector<std::unique_ptr<

void TfliteModelParser::SetTensorQuantParam(const std::unique_ptr<tflite::TensorT> &tflite_tensor,
schema::TensorT *tensor) {
std::unique_ptr<schema::QuantParamT> quant_param(new QuantParamT());
std::unique_ptr<schema::QuantParamT> quant_param = std::make_unique<QuantParamT>();
if (!tflite_tensor->quantization->scale.empty()) {
quant_param->scale = tflite_tensor->quantization->scale[0];
}
@@ -104,7 +104,7 @@ STATUS TfliteModelParser::ConvertOp(const std::unique_ptr<tflite::ModelT> &tflit
return RET_ERROR;
}

std::unique_ptr<schema::CNodeT> op(new schema::CNodeT);
std::unique_ptr<schema::CNodeT> op = std::make_unique<schema::CNodeT>();
op->name = op_type + "-" + std::to_string(idx++);
op->quantType = quant_type;
MS_LOG(INFO) << "parse op: " << op->name.c_str();
@@ -138,7 +138,7 @@ STATUS TfliteModelParser::ConvertTensor(const std::unique_ptr<tflite::SubGraphT>
idx += tflite_subgraph->tensors.size();
}
const auto &tflite_tensor = tflite_subgraph->tensors[idx];
std::unique_ptr<schema::TensorT> tensor(new schema::TensorT());
std::unique_ptr<schema::TensorT> tensor = std::make_unique<schema::TensorT>();

tensor->format = tensorsFormat[i];
tensor->dataType = GetTfliteDataType(tflite_tensor->type);
@@ -231,7 +231,7 @@ STATUS TfliteModelParser::ConvertGroupDepthwiseOp(schema::MetaGraphT* sub_graph)
if (op->primitive->value.type == schema::PrimitiveType_DepthwiseConv2D) {
auto attr = op->primitive->value.AsDepthwiseConv2D();
if (attr->channelMultiplier > 1) {
std::unique_ptr<schema::Conv2DT> conv_attr(new schema::Conv2DT);
std::unique_ptr<schema::Conv2DT> conv_attr = std::make_unique<schema::Conv2DT>();
// get channel attr
if (op->inputIndex.empty()) {
MS_LOG(ERROR) << "the input of DepthwiseConv2D is null";
@@ -298,7 +298,7 @@ STATUS TfliteModelParser::ConvertGroupDepthwiseOp(schema::MetaGraphT* sub_graph)
MetaGraphT *TfliteModelParser::Parse(const std::string &model_file,
const std::string &weight_file,
const QuantType &quant_type) {
std::unique_ptr<schema::MetaGraphT> sub_graph(new schema::MetaGraphT);
std::unique_ptr<schema::MetaGraphT> sub_graph = std::make_unique<schema::MetaGraphT>();
sub_graph->name = "MS_model converted by TF-Lite";

// load graph


Loading…
Cancel
Save