From f98eedd71c6aad04b288addd2f2aff47cc674577 Mon Sep 17 00:00:00 2001 From: zhengjun10 Date: Fri, 11 Sep 2020 20:48:15 +0800 Subject: [PATCH] add mindspore model senet finetune --- mindspore/lite/src/ops/deconv2d.cc | 81 +++++++++++++++++++ mindspore/lite/src/ops/deconv2d.h | 3 + mindspore/lite/src/ops/elu.cc | 22 +++++ mindspore/lite/src/ops/elu.h | 2 +- mindspore/lite/src/ops/log.cc | 22 +++++ mindspore/lite/src/ops/log.h | 1 + mindspore/lite/src/ops/primitive_c.cc | 8 +- mindspore/lite/test/models_mindspore.cfg | 1 + mindspore/lite/tools/common/graph_util.cc | 10 +-- .../graph/format_trans_pass.cc | 6 +- .../legacy_optimizer/graph/infershape_pass.cc | 43 ++++++++-- .../graph/weight_format_hardcode_pass.cc | 4 +- .../optimizer/common/node_pass_extends.cc | 11 ++- .../fusion/constant_folding_fusion.cc | 4 +- 14 files changed, 194 insertions(+), 24 deletions(-) diff --git a/mindspore/lite/src/ops/deconv2d.cc b/mindspore/lite/src/ops/deconv2d.cc index 536e380238..8f3da2bceb 100644 --- a/mindspore/lite/src/ops/deconv2d.cc +++ b/mindspore/lite/src/ops/deconv2d.cc @@ -15,6 +15,8 @@ */ #include "src/ops/deconv2d.h" +#include +#include namespace mindspore { namespace lite { @@ -56,7 +58,86 @@ void DeConv2D::SetHasBias(bool has_bias) { this->primitive_->value.AsDeConv2D()- void DeConv2D::SetActivationType(int activation_type) { this->primitive_->value.AsDeConv2D()->activationType = (schema::ActivationType)activation_type; } +void DeConv2D::PopulaterDeConv2DSingleGroup(const Primitive &prim, schema::PrimitiveT *primitive, const int &group) { + auto attr = std::make_unique(); + attr->group = group; + auto format = GetValue(prim.GetAttr("data_format")); + if (format == "NCHW") { + attr->format = schema::Format_NCHW; + } else if (format == "NHWC") { + attr->format = schema::Format_NHWC; + } else { + attr->format = schema::Format_NUM_OF_FORMAT; + } + auto pad_list = GetValue>(prim.GetAttr("pad_list")); + attr->padUp = pad_list[0]; + attr->padDown = pad_list[1]; + attr->padLeft = pad_list[2]; + attr->padRight = pad_list[3]; + + auto dilation = GetValue>(prim.GetAttr("dilation")); + attr->dilateH = dilation[0]; + attr->dilateW = dilation[1]; + + auto kernel_size = GetValue>(prim.GetAttr("kernel_size")); + attr->kernelH = kernel_size[0]; + attr->kernelW = kernel_size[1]; + + auto stride = GetValue>(prim.GetAttr("stride")); + attr->strideH = stride[0]; + attr->strideW = stride[1]; + + attr->channelOut = GetValue(prim.GetAttr("out_channel")); + + auto pad_mode = GetValue(prim.GetAttr("pad_mode")); + if (pad_mode == "valid" || pad_mode == "VALID") { + attr->padMode = schema::PadMode_VALID; + } else if (pad_mode == "same" || pad_mode == "SAME") { + attr->padMode = schema::PadMode_SAME; + } else { + attr->padMode = schema::PadMode_NOTSET; + } + + if (prim.GetAttr("activation_name") != nullptr) { + std::string activate_name = GetValue(prim.GetAttr("activation_name")); + attr->activationType = kActivationTypeMap[activate_name]; + } else { + attr->activationType = schema::ActivationType_NO_ACTIVATION; + } + + // attr->padMode = schema::PadMode_SAME; + // attr->activationType = schema::ActivationType_RELU; + primitive->value.type = schema::PrimitiveType_DeConv2D; + primitive->value.value = attr.release(); +} +int DeConv2D::UnPackAttr(const Primitive &prim, const std::vector &inputs) { + if (this->primitive_ == nullptr) { + this->primitive_ = new (std::nothrow) schema::PrimitiveT; + if (this->primitive_ == nullptr) { + MS_LOG(ERROR) << "new primitiveT failed"; + return RET_ERROR; + } + this->primitive_->value.type = schema::PrimitiveType_DeConv2D; + } + if (this->primitive_->value.type != schema::PrimitiveType_DeConv2D) { + MS_LOG(ERROR) << "primitive_ type is error:" << this->primitive_->value.type; + return RET_ERROR; + } + int group = GetValue(prim.GetAttr("group")); + if (group == 1) { + PopulaterDeConv2DSingleGroup(prim, this->primitive_, group); + } + + if (GetQuantType() == schema::QuantType_AwareTraining) { + std::vector> vecInputQuantParam; + std::vector> vecOutputQuantParam; + PopulaterQuantParam(prim, &vecInputQuantParam, &vecOutputQuantParam); + SetInputQuantParam(vecInputQuantParam); + SetOutputQuantParam(vecOutputQuantParam); + } + return RET_OK; +} #else int DeConv2D::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { MS_ASSERT(nullptr != primitive); diff --git a/mindspore/lite/src/ops/deconv2d.h b/mindspore/lite/src/ops/deconv2d.h index acde7c9527..a73dfdf337 100644 --- a/mindspore/lite/src/ops/deconv2d.h +++ b/mindspore/lite/src/ops/deconv2d.h @@ -48,6 +48,9 @@ class DeConv2D : public PrimitiveC { void SetDilateH(int dilate_h); void SetHasBias(bool has_bias); void SetActivationType(int activation_type); + void PopulaterDeConv2DSingleGroup(const Primitive &prim, schema::PrimitiveT *primitive, const int &group); + int UnPackAttr(const Primitive &prim, const std::vector &inputs) override; + #else DeConv2D() = default; diff --git a/mindspore/lite/src/ops/elu.cc b/mindspore/lite/src/ops/elu.cc index 9a1e16991f..eefafa5bf9 100644 --- a/mindspore/lite/src/ops/elu.cc +++ b/mindspore/lite/src/ops/elu.cc @@ -15,6 +15,7 @@ */ #include "src/ops/elu.h" +#include namespace mindspore { namespace lite { @@ -23,6 +24,27 @@ float Elu::GetAlpha() const { return this->primitive_->value.AsElu()->alpha; } void Elu::SetAlpha(float alpha) { this->primitive_->value.AsElu()->alpha = alpha; } +int Elu::UnPackAttr(const Primitive &prim, const std::vector &inputs) { + if (this->primitive_ == nullptr) { + this->primitive_ = new (std::nothrow) schema::PrimitiveT; + if (this->primitive_ == nullptr) { + MS_LOG(ERROR) << "new primitiveT failed"; + return RET_ERROR; + } + this->primitive_->value.type = schema::PrimitiveType_Elu; + } + if (this->primitive_->value.type != schema::PrimitiveType_Elu) { + MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; + return RET_ERROR; + } + auto attr = std::make_unique(); + this->primitive_->value.value = attr.release(); + if (this->primitive_->value.value == nullptr) { + MS_LOG(ERROR) << "new primitiveT value failed"; + return RET_ERROR; + } + return RET_OK; +} #else int Elu::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { MS_ASSERT(nullptr != primitive); diff --git a/mindspore/lite/src/ops/elu.h b/mindspore/lite/src/ops/elu.h index e0f3a5f576..955d9c9ab1 100644 --- a/mindspore/lite/src/ops/elu.h +++ b/mindspore/lite/src/ops/elu.h @@ -32,7 +32,7 @@ class Elu : public PrimitiveC { Elu() = default; explicit Elu(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} void SetAlpha(float alpha); - + int UnPackAttr(const Primitive &prim, const std::vector &inputs) override; #else Elu() = default; diff --git a/mindspore/lite/src/ops/log.cc b/mindspore/lite/src/ops/log.cc index f35ec426a4..6567f194f0 100644 --- a/mindspore/lite/src/ops/log.cc +++ b/mindspore/lite/src/ops/log.cc @@ -15,10 +15,32 @@ */ #include "src/ops/log.h" +#include namespace mindspore { namespace lite { #ifdef PRIMITIVE_WRITEABLE +int Log::UnPackAttr(const Primitive &prim, const std::vector &inputs) { + if (this->primitive_ == nullptr) { + this->primitive_ = new (std::nothrow) schema::PrimitiveT; + if (this->primitive_ == nullptr) { + MS_LOG(ERROR) << "new primitiveT failed"; + return RET_ERROR; + } + this->primitive_->value.type = schema::PrimitiveType_Log; + } + if (this->primitive_->value.type != schema::PrimitiveType_Log) { + MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; + return RET_ERROR; + } + auto attr = std::make_unique(); + this->primitive_->value.value = attr.release(); + if (this->primitive_->value.value == nullptr) { + MS_LOG(ERROR) << "new primitiveT value failed"; + return RET_ERROR; + } + return RET_OK; +} #else int Log::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { MS_ASSERT(nullptr != primitive); diff --git a/mindspore/lite/src/ops/log.h b/mindspore/lite/src/ops/log.h index 1bbac2eba5..e96976cdf0 100644 --- a/mindspore/lite/src/ops/log.h +++ b/mindspore/lite/src/ops/log.h @@ -31,6 +31,7 @@ class Log : public ArithmeticSelf { MS_DECLARE_PARENT(Log, ArithmeticSelf); Log() = default; explicit Log(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} + int UnPackAttr(const Primitive &prim, const std::vector &inputs) override; #else Log() = default; diff --git a/mindspore/lite/src/ops/primitive_c.cc b/mindspore/lite/src/ops/primitive_c.cc index 6b48d6c5a8..0ebfe4b5bf 100644 --- a/mindspore/lite/src/ops/primitive_c.cc +++ b/mindspore/lite/src/ops/primitive_c.cc @@ -369,6 +369,12 @@ std::shared_ptr PrimitiveC::Create(const Primitive &prim, const std: return NewPrimitiveC(prim, inputs, quantType); } else if (op_type == "Transpose") { return NewPrimitiveC(prim, inputs, quantType); + } else if (op_type == "Elu") { + return NewPrimitiveC(prim, inputs, quantType); + } else if (op_type == "Log") { + return NewPrimitiveC(prim, inputs, quantType); + } else if (op_type == "Conv2DBackpropInput") { + return NewPrimitiveC(prim, inputs, quantType); } else if (op_type == "tuple_getitem") { return NewPrimitiveC(prim, inputs, quantType); } else if (op_type == "Softmax") { @@ -380,8 +386,6 @@ std::shared_ptr PrimitiveC::Create(const Primitive &prim, const std: return NewPrimitiveC(prim, inputs, quantType); } else if (op_type == "Conv2DBackpropFilter") { return NewPrimitiveC(prim, inputs, quantType); - } else if (op_type == "Conv2DBackpropInput") { - return NewPrimitiveC(prim, inputs, quantType); } else if (op_type == "BiasAddGrad") { return NewPrimitiveC(prim, inputs, quantType); } else if (op_type == "ApplyMomentum") { diff --git a/mindspore/lite/test/models_mindspore.cfg b/mindspore/lite/test/models_mindspore.cfg index 7fd93e4824..a41c9b1ba6 100644 --- a/mindspore/lite/test/models_mindspore.cfg +++ b/mindspore/lite/test/models_mindspore.cfg @@ -1,2 +1,3 @@ ssd.mindir mobilenetv2_438.mindir +gate_u_net_small-1_110.mindir \ No newline at end of file diff --git a/mindspore/lite/tools/common/graph_util.cc b/mindspore/lite/tools/common/graph_util.cc index d8725bab9f..c6c746f739 100644 --- a/mindspore/lite/tools/common/graph_util.cc +++ b/mindspore/lite/tools/common/graph_util.cc @@ -403,8 +403,8 @@ NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, si preTensor->refCount = 0; preTensor->data.clear(); if (toAddNodeIn->primitive->value.type == schema::PrimitiveType_QuantDTypeCast) { - preTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->dstT; - toAddTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->srcT; + preTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->srcT; + toAddTensor->dataType = toAddNodeIn->primitive->value.AsQuantDTypeCast()->dstT; } graphT->allTensors.emplace_back(std::move(toAddTensor)); size_t toAddTensorIdx = graphT->allTensors.size() - 1; @@ -415,10 +415,10 @@ NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, si return graphT->nodes.end(); } toAddNode->inputIndex.clear(); - toAddNode->inputIndex.push_back(toAddTensorIdx); + toAddNode->inputIndex.push_back(preTensorIdx); toAddNode->outputIndex.clear(); - toAddNode->outputIndex.push_back(preTensorIdx); - for (auto iter = graphT->inputIndex.begin(); iter != graphT->inputIndex.end(); iter++) { + toAddNode->outputIndex.push_back(toAddTensorIdx); + for (auto iter = existNode->inputIndex.begin(); iter != existNode->inputIndex.end(); iter++) { if (*iter == preTensorIdx) { *iter = toAddTensorIdx; break; diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc index 51f266ca13..5f644d998e 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc @@ -58,6 +58,7 @@ STATUS FormatTransPass::DoModelInputFormatTrans(schema::MetaGraphT *graph) { } auto graphInputIdxes = graph->inputIndex; for (size_t i = 0; i < graphInputIdxes.size(); i++) { + bool transed = false; auto inputIdx = graphInputIdxes.at(i); MS_ASSERT(inputIdx < subGraph->allTensors.size()); auto &tensor = graph->allTensors.at(inputIdx); @@ -84,7 +85,10 @@ STATUS FormatTransPass::DoModelInputFormatTrans(schema::MetaGraphT *graph) { graphInTensor->format = schema::Format::Format_NHWC; // assume parser not reformat shape auto oldDims = graphInTensor->dims; - graphInTensor->dims = {oldDims[NCHW_N], oldDims[NCHW_H], oldDims[NCHW_W], oldDims[NCHW_C]}; + if (!transed) { + graphInTensor->dims = {oldDims[NCHW_N], oldDims[NCHW_H], oldDims[NCHW_W], oldDims[NCHW_C]}; + transed = true; + } break; } } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc index 6bb15bcbfa..93045c0156 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc @@ -58,40 +58,72 @@ std::vector ConvertTensorToLiteTensor(MetaGraphT *graph, const std::ve } return lite_tensors; } +void PrintTensorShape(const std::vector &input_tensors, const std::vector &output_tensors) { + int i = 0; + for (auto input_tensor : input_tensors) { + std::ostringstream oss; + for (auto &dim : input_tensor->shape()) { + oss << " " << dim; + } + MS_LOG(DEBUG) << "input shape " << i++ << ":" << oss.str(); + } + i = 0; + for (auto output_tensor : output_tensors) { + std::ostringstream oss; + for (auto &dim : output_tensor->shape()) { + oss << " " << dim; + } + MS_LOG(DEBUG) << "output shape" << i++ << ":" << oss.str(); + } +} +void FreeTensors(std::vector input_tensors, std::vector output_tensors) { + input_tensors.clear(); + input_tensors.shrink_to_fit(); + output_tensors.clear(); + output_tensors.shrink_to_fit(); +} } // namespace STATUS InferShapePass::Run(MetaGraphT *graph) { MS_ASSERT(graph != nullptr); for (auto iter = graph->nodes.begin(); iter != graph->nodes.end(); iter++) { auto &node = *iter; auto input_tensors = ConvertTensorToLiteTensor(graph, node->inputIndex, node->primitive->value.type); + std::vector output_tensors; if (input_tensors.empty() || input_tensors.size() != node->inputIndex.size()) { MS_LOG(ERROR) << "convert input lite tensor error"; + FreeTensors(input_tensors, output_tensors); return RET_INFER_ERR; } - auto output_tensors = ConvertTensorToLiteTensor(graph, node->outputIndex, node->primitive->value.type); + output_tensors = ConvertTensorToLiteTensor(graph, node->outputIndex, node->primitive->value.type); if (output_tensors.empty() || output_tensors.size() != node->outputIndex.size()) { MS_LOG(ERROR) << "convert output lite tensor error"; + FreeTensors(input_tensors, output_tensors); return RET_INFER_ERR; } - std::unique_ptr primitiveT(new (std::nothrow) PrimitiveT(*node->primitive)); + std::unique_ptr primitiveT(new(std::nothrow) PrimitiveT(*node->primitive)); if (primitiveT == nullptr) { MS_LOG(ERROR) << "copy primitiveT error"; + FreeTensors(input_tensors, output_tensors); return RET_ERROR; } auto primitiveC = std::shared_ptr(PrimitiveC::Create(primitiveT.release())); if (primitiveC == nullptr) { MS_LOG(ERROR) << "unpack primitiveT error"; + FreeTensors(input_tensors, output_tensors); return RET_ERROR; } auto ret = primitiveC->InferShape(input_tensors, output_tensors); + MS_LOG(DEBUG) << "cur node:" << node->name; if (ret == RET_INFER_INVALID) { MS_LOG(INFO) << "InferShape shouldn't be done before runtime, name: " << node->name << ", type: " << schema::EnumNamePrimitiveType(node->primitive->value.type) << "flag set to false."; } else if (ret != RET_OK) { MS_LOG(WARNING) << "InferShape failed, name: " << node->name << ", type: " << schema::EnumNamePrimitiveType(node->primitive->value.type); + FreeTensors(input_tensors, output_tensors); return RET_INFER_ERR; } + PrintTensorShape(input_tensors, output_tensors); // copy output shape to tensorT for (size_t i = 0; i < output_tensors.size(); i++) { auto output_dims = output_tensors[i]->shape(); @@ -100,12 +132,7 @@ STATUS InferShapePass::Run(MetaGraphT *graph) { output_tensor->format = output_tensors[i]->GetFormat(); output_tensor->dataType = output_tensors[i]->data_type(); } - for (auto input_tensor : input_tensors) { - delete input_tensor; - } - for (auto output_tensor : output_tensors) { - delete output_tensor; - } + FreeTensors(input_tensors, output_tensors); } return RET_OK; } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_hardcode_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_hardcode_pass.cc index a4bea1892b..44b33c6226 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_hardcode_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/weight_format_hardcode_pass.cc @@ -170,7 +170,9 @@ STATUS WeightFormatHardCodePass::HardCodeMS(const std::unique_ptr &node, if (opType == PrimitiveType_Conv2D) { weightTensor->format = schema::Format::Format_KCHW; } else if (opType == PrimitiveType_DepthwiseConv2D) { - weightTensor->format = schema::Format::Format_CKHW; + weightTensor->format = Format_CKHW; + } else if (opType == PrimitiveType_DeConv2D) { + weightTensor->format = Format_KCHW; } else { MS_LOG(ERROR) << "Unsupported opType: " << EnumNamePrimitiveType(opType) << ", node: " << node->name; return RET_ERROR; diff --git a/mindspore/lite/tools/optimizer/common/node_pass_extends.cc b/mindspore/lite/tools/optimizer/common/node_pass_extends.cc index 4aacd49276..4c99ed20db 100644 --- a/mindspore/lite/tools/optimizer/common/node_pass_extends.cc +++ b/mindspore/lite/tools/optimizer/common/node_pass_extends.cc @@ -41,12 +41,12 @@ bool NodePass::Run(const FuncGraphPtr &func_graph) { if (seen_node.count(node) > 0 || !manager->all_nodes().contains(node)) { continue; } - (void)seen_node.insert(node); + (void) seen_node.insert(node); AnfNodePtr new_node = Run(func_graph, node); bool change = (new_node != nullptr); if (new_node != nullptr && new_node != node) { - (void)manager->Replace(node, new_node); - (void)seen_node.erase(node); + (void) manager->Replace(node, new_node); + (void) seen_node.erase(node); } else if (new_node == nullptr) { new_node = node; } @@ -61,9 +61,12 @@ bool NodePass::Run(const FuncGraphPtr &func_graph) { auto cnode = new_node->cast(); MS_EXCEPTION_IF_NULL(cnode); auto inputs = cnode->inputs(); - (void)to_process.insert(to_process.end(), inputs.begin(), inputs.end()); + (void) to_process.insert(to_process.end(), inputs.begin(), inputs.end()); } changes = changes || change; + if (changes) { + MS_LOG(DEBUG) << "pass " << this->name() << "changed node:" << new_node->fullname_with_scope(); + } } return changes; } diff --git a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc index adb2a7f2e7..122c226083 100644 --- a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc @@ -174,7 +174,7 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An CheckIfFuncGraphIsNull(func_graph); CheckIfAnfNodeIsNull(node); if (!node->isa()) { - return node; + return nullptr; } auto any_node = node->cast(); CheckIfCNodeIsNull(any_node); @@ -191,7 +191,6 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An continue; } changed = true; - MS_LOG(INFO) << "Begin fold node:" << input_node->fullname_with_scope(); auto output_nums = GetOutputTensorNum(input_cnode); std::vector output_tensors{output_nums, new Tensor()}; auto lite_primitive = GetValueNode>(input_cnode->input(0)); @@ -254,6 +253,7 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An MS_LOG(ERROR) << "constant_folding replace cnode failed"; return nullptr; } + MS_LOG(DEBUG) << "fold node:" << input_node->fullname_with_scope() << " success "; FreeTensors(&input_tensors, &output_tensors); delete (lite_kernel); }