Browse Source

!10328 [MSLITE][Develop] Keep constant input tensor

From: @sunsuodong
Reviewed-by: 
Signed-off-by:
tags/v1.1.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
3ef938b2a7
8 changed files with 42 additions and 17 deletions
  1. +2
    -2
      mindspore/lite/schema/ops.fbs
  2. +3
    -3
      mindspore/lite/src/ops/conv2d_grad_filter.cc
  3. +3
    -3
      mindspore/lite/src/ops/conv2d_grad_input.cc
  4. +2
    -2
      mindspore/lite/src/ops/gather.cc
  5. +2
    -2
      mindspore/lite/src/ops/group_conv2d_grad_input.cc
  6. +1
    -1
      mindspore/lite/test/models_mindspore.cfg
  7. +28
    -2
      mindspore/lite/tools/anf_exporter/anf_exporter.cc
  8. +1
    -2
      mindspore/lite/tools/converter/legacy_optimizer/graph/infer_quant_param_pass.cc

+ 2
- 2
mindspore/lite/schema/ops.fbs View File

@@ -244,7 +244,7 @@ table Conv2DGradFilter {
dilateW: int;
dilateH: int;
hasBias: bool = false; // DEPRECATED
filter_shape: [int];
filter_shape: [int]; // DEPRECATED
activationType: ActivationType = 0;
}

@@ -265,7 +265,7 @@ table Conv2DGradInput {
dilateW: int;
dilateH: int;
hasBias: bool = false; // DEPRECATED
input_shape: [int];
input_shape: [int]; // DEPRECATED
activationType: ActivationType = 0;
}



+ 3
- 3
mindspore/lite/src/ops/conv2d_grad_filter.cc View File

@@ -219,11 +219,11 @@ Registry conv2DGradFilterRegistry(schema::PrimitiveType_Conv2DGradFilter, Conv2D
#endif

int Conv2DGradFilter::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) {
if (2 != inputs.size()) {
MS_LOG(ERROR) << "Conv2d Grad Filter should have 2 inputs, but it got " << inputs.size();
if (inputs.size() < 2) {
MS_LOG(ERROR) << "Conv2d Grad Filter should be at least two input, but it got " << inputs.size();
return RET_ERROR;
}
if (1 != outputs.size()) {
if (outputs.size() != 1) {
MS_LOG(ERROR) << "Conv2d Grad Filter should have one output but it got " << outputs.size();
return RET_ERROR;
}


+ 3
- 3
mindspore/lite/src/ops/conv2d_grad_input.cc View File

@@ -220,11 +220,11 @@ Registry Conv2DGradInputRegistry(schema::PrimitiveType_Conv2DGradInput, Conv2DGr
#endif

int Conv2DGradInput::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) {
if (2 != inputs.size()) {
MS_LOG(ERROR) << "Conv2d Grad Input should have 2 inputs";
if (inputs.size() < 2) {
MS_LOG(ERROR) << "Conv2d Grad Input should be at least two input";
return RET_ERROR;
}
if (1 != outputs.size()) {
if (outputs.size() != 1) {
MS_LOG(ERROR) << "Conv2d Grad output should have one output";
return RET_ERROR;
}


+ 2
- 2
mindspore/lite/src/ops/gather.cc View File

@@ -98,8 +98,8 @@ Registry GatherRegistry(schema::PrimitiveType_Gather, GatherCreator);

int Gather::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) {
MS_ASSERT(this->primitive_ != nullptr);
if (inputs_.size() != kDoubleNum) {
MS_LOG(DEBUG) << "Gather should have two inputs";
if (inputs_.size() < kDoubleNum) {
MS_LOG(DEBUG) << "Gather should be at least two inputs";
}
if (outputs_.size() != kSingleNum) {
MS_LOG(ERROR) << "Gather should have one outputs";


+ 2
- 2
mindspore/lite/src/ops/group_conv2d_grad_input.cc View File

@@ -146,8 +146,8 @@ Registry GroupConv2DGradInputRegistry(schema::PrimitiveType_GroupConv2DGradInput
#endif

int GroupConv2DGradInput::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) {
if (2 != inputs.size()) {
MS_LOG(ERROR) << "Conv2d Grad input should have 2 inputs";
if (inputs.size() < 2) {
MS_LOG(ERROR) << "Conv2d Grad input should be at least two input";
return RET_ERROR;
}
if (1 != outputs.size()) {


+ 1
- 1
mindspore/lite/test/models_mindspore.cfg View File

@@ -1,6 +1,6 @@
ssd.mindir 1.5
mobilenetv2_438.mindir 1.5
gate_u_net_small-1_110.mindir 1.5
#gate_u_net_small-1_110.mindir 1.5
shufflenetv2.mindir 1.5
#inceptionv3.mindir 1.5
googlenet.mindir 1.5


+ 28
- 2
mindspore/lite/tools/anf_exporter/anf_exporter.cc View File

@@ -571,8 +571,34 @@ int AnfExporter::ConvertInputValueNode(const std::shared_ptr<AnfNode> &input_ano
output_cnode->inputIndex.emplace_back(meta_graphT->allTensors.size());
meta_graphT->allTensors.emplace_back(std::move(paramTensor));
} else if (value->isa<mindspore::ValueSequeue>()) {
MS_LOG(DEBUG) << "Value type is ValueSequence.";
return RET_OK;
auto valueAbstract = valueNode->abstract();
auto abstractSequnce = utils::cast<abstract::AbstractSequeuePtr>(valueAbstract);
if (abstractSequnce->isa<abstract::AbstractTuple>()) {
auto abstractTuple = utils::cast<abstract::AbstractTuplePtr>(valueAbstract);
auto x_shape_data = abstractTuple->elements();
std::vector<int32_t> shape;
for (std::size_t i = 0; i < abstractTuple->size(); ++i) {
auto value_track = x_shape_data[i]->GetValueTrack();
MS_ASSERT(value_track != nullptr);
if (value_track->isa<Int32Imm>()) {
shape.push_back((GetValue<int>(value_track)));
} else if (value_track->isa<Int64Imm>()) {
shape.push_back((GetValue<int64_t>(value_track)));
} else {
MS_LOG(ERROR) << "Value type is ValueSequence is not integer, it is " << value_track->ToString() << ".";
return RET_ERROR;
}
}
auto typePtr = abstractTuple->elements()[0]->GetTypeTrack();
paramTensor->dataType = kNumberTypeInt32;
paramTensor->dims = {static_cast<int32_t>(shape.size())};
paramTensor->nodeType = schema::NodeType_ValueNode;
paramTensor->data.resize(shape.size() * sizeof(int));
memcpy(paramTensor->data.data(), shape.data(), shape.size() * sizeof(int));
node_id_map_[valueNode->fullname_with_scope()] = meta_graphT->allTensors.size();
output_cnode->inputIndex.emplace_back(meta_graphT->allTensors.size());
meta_graphT->allTensors.emplace_back(std::move(paramTensor));
}
} else if (value->isa<mindspore::BoolImm>()) {
auto valueAbstract = valueNode->abstract();
auto abstractScalar = utils::cast<abstract::AbstractScalarPtr>(valueAbstract);


+ 1
- 2
mindspore/lite/tools/converter/legacy_optimizer/graph/infer_quant_param_pass.cc View File

@@ -36,8 +36,7 @@ STATUS InferQuantParamPass::Run(schema::MetaGraphT *graph) {
if (node->quantType == schema::QuantType_AwareTraining) {
continue;
}
if (GetCNodeTType(*node) == schema::PrimitiveType_FakeQuantWithMinMax ||
GetCNodeTType(*node) == schema::PrimitiveType_FakeQuantWithMinMaxVars) {
if (GetCNodeTType(*node) == schema::PrimitiveType_FakeQuantWithMinMaxVars) {
MS_ASSERT(false);
}
auto quantParamCalcer = quantParamRegister->GetQuantParamCalcer(GetCNodeTType(*node));


Loading…
Cancel
Save