Browse Source

fix deconv outputPaddings bug

tags/v1.2.0-rc1
liuyu 5 years ago
parent
commit
ea54edcdba
12 changed files with 31 additions and 41 deletions
  1. +9
    -24
      mindspore/core/ops/fusion/conv2d_transpose_fusion.cc
  2. +3
    -6
      mindspore/core/ops/fusion/conv2d_transpose_fusion.h
  3. +1
    -2
      mindspore/core/ops/op_utils.h
  4. +2
    -2
      mindspore/lite/nnacl/conv_parameter.h
  5. +2
    -2
      mindspore/lite/nnacl/infer/deconv2d_infer.c
  6. +1
    -0
      mindspore/lite/schema/ops.fbs
  7. +1
    -0
      mindspore/lite/src/ops/ops_def.cc
  8. +2
    -0
      mindspore/lite/src/ops/populate/deconv2d_populate.cc
  9. +1
    -0
      mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.cc
  10. +7
    -5
      mindspore/lite/tools/converter/parser/onnx/onnx_conv_transpose_parser.cc
  11. +1
    -0
      mindspore/lite/tools/converter/parser/tf/tf_deconv_parser.cc
  12. +1
    -0
      mindspore/lite/tools/converter/parser/tflite/tflite_conv_transpose_parser.cc

+ 9
- 24
mindspore/core/ops/fusion/conv2d_transpose_fusion.cc View File

@@ -23,8 +23,7 @@ void Conv2dTransposeFusion::Init(int64_t in_channel, int64_t out_channel, const
int64_t mode, const PadMode &pad_mode, const std::vector<int64_t> &pad, int64_t mode, const PadMode &pad_mode, const std::vector<int64_t> &pad,
const std::vector<int64_t> &stride, const std::vector<int64_t> &dilation, const std::vector<int64_t> &stride, const std::vector<int64_t> &dilation,
int64_t group, const Format &format, const std::vector<int64_t> &pad_list, int64_t group, const Format &format, const std::vector<int64_t> &pad_list,
const std::vector<int64_t> &output_padding_h,
const std::vector<int64_t> &output_padding_w, const ActivationType activation_type) {
const std::vector<int64_t> &output_paddings, const ActivationType activation_type) {
set_in_channel(in_channel); set_in_channel(in_channel);
set_out_channel(out_channel); set_out_channel(out_channel);
set_kernel_size(kernel_size); set_kernel_size(kernel_size);
@@ -36,8 +35,7 @@ void Conv2dTransposeFusion::Init(int64_t in_channel, int64_t out_channel, const
set_group(group); set_group(group);
set_format(format); set_format(format);
set_pad_list(pad_list); set_pad_list(pad_list);
set_output_padding_h(output_padding_h);
set_output_padding_w(output_padding_w);
set_output_paddings(output_paddings);
set_activation_type(activation_type); set_activation_type(activation_type);
} }


@@ -57,20 +55,12 @@ void Conv2dTransposeFusion::set_dilation(const std::vector<int64_t> &dilation) {
AddAttr(kDilation, MakeValue(dilation)); AddAttr(kDilation, MakeValue(dilation));
} }


void Conv2dTransposeFusion::set_output_padding_h(const std::vector<int64_t> &output_padding_h) {
CheckAndConvertUtils::CheckInteger(koutputPaddingH, output_padding_h.size(), kGreaterEqual, 1, name());
for (int64_t item : output_padding_h) {
CheckAndConvertUtils::CheckInteger(koutputPaddingH, item, kGreaterEqual, 0, name());
void Conv2dTransposeFusion::set_output_paddings(const std::vector<int64_t> &output_paddings) {
CheckAndConvertUtils::CheckInteger(koutputPaddings, output_paddings.size(), kGreaterEqual, 1, name());
for (int64_t item : output_paddings) {
CheckAndConvertUtils::CheckInteger(koutputPaddings, item, kGreaterEqual, 0, name());
} }
AddAttr(kDilation, MakeValue(output_padding_h));
}

void Conv2dTransposeFusion::set_output_padding_w(const std::vector<int64_t> &output_padding_w) {
CheckAndConvertUtils::CheckInteger(koutputPaddingW, output_padding_w.size(), kGreaterEqual, 1, name());
for (int64_t item : output_padding_w) {
CheckAndConvertUtils::CheckInteger(koutputPaddingW, item, kGreaterEqual, 0, name());
}
AddAttr(kDilation, MakeValue(output_padding_w));
AddAttr(koutputPaddings, MakeValue(output_paddings));
} }


void Conv2dTransposeFusion::set_activation_type(const ActivationType activation_type) { void Conv2dTransposeFusion::set_activation_type(const ActivationType activation_type) {
@@ -78,13 +68,8 @@ void Conv2dTransposeFusion::set_activation_type(const ActivationType activation_
this->AddAttr(kActivationType, MakeValue(swi)); this->AddAttr(kActivationType, MakeValue(swi));
} }


std::vector<int64_t> Conv2dTransposeFusion::get_output_padding_h() const {
auto value_ptr = GetAttr(koutputPaddingH);
return GetValue<std::vector<int64_t>>(value_ptr);
}

std::vector<int64_t> Conv2dTransposeFusion::get_output_padding_w() const {
auto value_ptr = GetAttr(koutputPaddingW);
std::vector<int64_t> Conv2dTransposeFusion::get_output_paddings() const {
auto value_ptr = GetAttr(koutputPaddings);
return GetValue<std::vector<int64_t>>(value_ptr); return GetValue<std::vector<int64_t>>(value_ptr);
} }




+ 3
- 6
mindspore/core/ops/fusion/conv2d_transpose_fusion.h View File

@@ -36,16 +36,13 @@ class Conv2dTransposeFusion : public Conv2dTranspose {
const PadMode &pad_mode = VALID, const std::vector<int64_t> &pad = {0, 0, 0, 0}, const PadMode &pad_mode = VALID, const std::vector<int64_t> &pad = {0, 0, 0, 0},
const std::vector<int64_t> &stride = {1, 1}, const std::vector<int64_t> &dilation = {1, 1}, const std::vector<int64_t> &stride = {1, 1}, const std::vector<int64_t> &dilation = {1, 1},
int64_t group = 1, const Format &format = NCHW, const std::vector<int64_t> &pad_list = {0, 0, 0, 0}, int64_t group = 1, const Format &format = NCHW, const std::vector<int64_t> &pad_list = {0, 0, 0, 0},
const std::vector<int64_t> &output_padding_h = {0}, const std::vector<int64_t> &output_padding_w = {0},
const ActivationType activation_type = NO_ACTIVATION);
const std::vector<int64_t> &output_paddings = {0}, const ActivationType activation_type = NO_ACTIVATION);
void set_kernel_size(const std::vector<int64_t> &kernel_size); void set_kernel_size(const std::vector<int64_t> &kernel_size);
void set_dilation(const std::vector<int64_t> &dilation); void set_dilation(const std::vector<int64_t> &dilation);
void set_output_padding_h(const std::vector<int64_t> &output_padding_h);
void set_output_padding_w(const std::vector<int64_t> &output_padding_w);
void set_output_paddings(const std::vector<int64_t> &output_paddings);
void set_activation_type(const ActivationType activation_type); void set_activation_type(const ActivationType activation_type);


std::vector<int64_t> get_output_padding_h() const;
std::vector<int64_t> get_output_padding_w() const;
std::vector<int64_t> get_output_paddings() const;
ActivationType get_activation_type() const; ActivationType get_activation_type() const;
}; };
} // namespace ops } // namespace ops


+ 1
- 2
mindspore/core/ops/op_utils.h View File

@@ -136,8 +136,7 @@ constexpr auto kOutChannel = "out_channel";
constexpr auto kOutMaxValue = "out_max_value"; constexpr auto kOutMaxValue = "out_max_value";
constexpr auto kOutputChannel = "output_channel"; constexpr auto kOutputChannel = "output_channel";
constexpr auto kOutputNum = "output_num"; constexpr auto kOutputNum = "output_num";
constexpr auto koutputPaddingH = "outputPaddingH";
constexpr auto koutputPaddingW = "outputPaddingW";
constexpr auto koutputPaddings = "output_paddings";
constexpr auto kOutputType = "output_type"; constexpr auto kOutputType = "output_type";
constexpr auto kOutQuantized = "out_quantized"; constexpr auto kOutQuantized = "out_quantized";
constexpr auto kP = "p"; constexpr auto kP = "p";


+ 2
- 2
mindspore/lite/nnacl/conv_parameter.h View File

@@ -52,8 +52,8 @@ typedef struct ConvParameter {
PadMode pad_mode_; PadMode pad_mode_;
ActType act_type_; ActType act_type_;
int channel_multiplie_; int channel_multiplie_;
int output_padding_w;
int output_padding_h;
int output_padding_w_;
int output_padding_h_;
} ConvParameter; } ConvParameter;


typedef struct SlidingWindowParam { typedef struct SlidingWindowParam {


+ 2
- 2
mindspore/lite/nnacl/infer/deconv2d_infer.c View File

@@ -69,8 +69,8 @@ int Deconv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC
return NNACL_ERR; return NNACL_ERR;
} }


output_h += param->output_padding_h;
output_w += param->output_padding_w;
output_h += param->output_padding_h_;
output_w += param->output_padding_w_;


output->shape_size_ = 4; output->shape_size_ = 4;
output->shape_[0] = output_n; output->shape_[0] = output_n;


+ 1
- 0
mindspore/lite/schema/ops.fbs View File

@@ -418,6 +418,7 @@ table Conv2dTransposeFusion {
in_channel: long; in_channel: long;
out_channel: long; out_channel: long;
activation_type: ActivationType = 0; activation_type: ActivationType = 0;
output_paddings: [long];
} }


table Cos { table Cos {


+ 1
- 0
mindspore/lite/src/ops/ops_def.cc View File

@@ -417,6 +417,7 @@ OP_ATTR(group, long)
OP_ATTR(in_channel, long) OP_ATTR(in_channel, long)
OP_ATTR(out_channel, long) OP_ATTR(out_channel, long)
OP_ATTR_ENUM_WITH_VALUE(activation_type, ActivationType, 0) OP_ATTR_ENUM_WITH_VALUE(activation_type, ActivationType, 0)
OP_ATTR(output_paddings, [long])
OP_SCHEMA_DEF_END(Conv2dTransposeFusion) OP_SCHEMA_DEF_END(Conv2dTransposeFusion)


OP_SCHEMA_DEF(Cos) OP_SCHEMA_DEF(Cos)


+ 2
- 0
mindspore/lite/src/ops/populate/deconv2d_populate.cc View File

@@ -35,6 +35,8 @@ OpParameter *PopulateDeconvParameter(const void *prim) {
conv_param->group_ = static_cast<int>(conv_primitive->group()); conv_param->group_ = static_cast<int>(conv_primitive->group());
conv_param->stride_h_ = static_cast<int>(*(conv_primitive->stride()->begin())); conv_param->stride_h_ = static_cast<int>(*(conv_primitive->stride()->begin()));
conv_param->stride_w_ = static_cast<int>(*(conv_primitive->stride()->begin() + 1)); conv_param->stride_w_ = static_cast<int>(*(conv_primitive->stride()->begin() + 1));
conv_param->output_padding_h_ = static_cast<int>(*(conv_primitive->output_paddings()->begin()));
conv_param->output_padding_w_ = static_cast<int>(*(conv_primitive->output_paddings()->begin() + 1));
switch (conv_primitive->pad_mode()) { switch (conv_primitive->pad_mode()) {
case schema::PadMode_SAME: case schema::PadMode_SAME:
conv_param->pad_mode_ = Pad_same; conv_param->pad_mode_ = Pad_same;


+ 1
- 0
mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.cc View File

@@ -27,6 +27,7 @@ ops::PrimitiveC *CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &pr
prim->set_pad({0, 0, 0, 0}); prim->set_pad({0, 0, 0, 0});
prim->set_format(mindspore::Format::NCHW); prim->set_format(mindspore::Format::NCHW);
prim->set_pad_mode(mindspore::PadMode::PAD); prim->set_pad_mode(mindspore::PadMode::PAD);
prim->set_output_paddings({0, 0});


const caffe::ConvolutionParameter &convParam = proto.convolution_param(); const caffe::ConvolutionParameter &convParam = proto.convolution_param();
// parse pad // parse pad


+ 7
- 5
mindspore/lite/tools/converter/parser/onnx/onnx_conv_transpose_parser.cc View File

@@ -28,7 +28,7 @@ ops::PrimitiveC *OnnxDeConvParser::Parse(const onnx::GraphProto &onnx_graph, con


prim->set_pad({0, 0, 0, 0}); prim->set_pad({0, 0, 0, 0});
mindspore::PadMode pad_mode = mindspore::PadMode::PAD; mindspore::PadMode pad_mode = mindspore::PadMode::PAD;
std::vector<int64_t> kernel, dilate, stride, pads, output_padding_h, output_padding_w;
std::vector<int64_t> kernel, dilate, stride, pads, output_paddings;
int64_t group = 1; int64_t group = 1;
for (const auto &onnx_node_attr : onnx_node.attribute()) { for (const auto &onnx_node_attr : onnx_node.attribute()) {
if (onnx_node_attr.name() == "group") { if (onnx_node_attr.name() == "group") {
@@ -40,10 +40,9 @@ ops::PrimitiveC *OnnxDeConvParser::Parse(const onnx::GraphProto &onnx_graph, con
return nullptr; return nullptr;
} }
if (onnx_node_attr.name() == "output_padding") { if (onnx_node_attr.name() == "output_padding") {
output_padding_h.push_back(static_cast<int32_t>(onnx_node_attr.ints(0)));
output_padding_w.push_back(static_cast<int32_t>(onnx_node_attr.ints(1)));
prim->set_output_padding_h(output_padding_h);
prim->set_output_padding_w(output_padding_w);
output_paddings.push_back(static_cast<int32_t>(onnx_node_attr.ints(0)));
output_paddings.push_back(static_cast<int32_t>(onnx_node_attr.ints(1)));
prim->set_output_paddings(output_paddings);
} }
} }
prim->set_format(mindspore::Format::NCHW); prim->set_format(mindspore::Format::NCHW);
@@ -65,6 +64,9 @@ ops::PrimitiveC *OnnxDeConvParser::Parse(const onnx::GraphProto &onnx_graph, con
if (!stride.empty()) { if (!stride.empty()) {
prim->set_stride(stride); prim->set_stride(stride);
} }
if (output_paddings.empty()) {
prim->set_output_paddings({0, 0});
}


const auto &onnx_conv_weight = onnx_node.input(1); const auto &onnx_conv_weight = onnx_node.input(1);
auto node_iter = auto node_iter =


+ 1
- 0
mindspore/lite/tools/converter/parser/tf/tf_deconv_parser.cc View File

@@ -33,6 +33,7 @@ ops::PrimitiveC *TFDeconvParser::Parse(const tensorflow::NodeDef &tf_op,
prim->set_pad({0, 0, 0, 0}); prim->set_pad({0, 0, 0, 0});
auto format = TensorFlowUtils::ParseNodeFormat(tf_op); auto format = TensorFlowUtils::ParseNodeFormat(tf_op);
prim->set_format(format); prim->set_format(format);
prim->set_output_paddings({0, 0});


std::vector<int64_t> dilations(2); std::vector<int64_t> dilations(2);
if (ParseDilations(tf_op, format, &dilations) != RET_OK) { if (ParseDilations(tf_op, format, &dilations) != RET_OK) {


+ 1
- 0
mindspore/lite/tools/converter/parser/tflite/tflite_conv_transpose_parser.cc View File

@@ -30,6 +30,7 @@ ops::PrimitiveC *TfliteDeConvParser::Parse(const std::unique_ptr<tflite::Operato
prim->set_format(mindspore::Format::NHWC); prim->set_format(mindspore::Format::NHWC);
prim->set_activation_type(mindspore::ActivationType::NO_ACTIVATION); prim->set_activation_type(mindspore::ActivationType::NO_ACTIVATION);
prim->set_dilation({1, 1}); prim->set_dilation({1, 1});
prim->set_output_paddings({0, 0});


MS_ASSERT(tflite_op != nullptr); MS_ASSERT(tflite_op != nullptr);
MS_ASSERT(tflite_model != nullptr); MS_ASSERT(tflite_model != nullptr);


Loading…
Cancel
Save