Browse Source

[MSLITE][Develop] return RET_INFET_INVALID when infer_flag is false for op infershape

tags/v1.1.0
yangruoqi713 5 years ago
parent
commit
4fc003bfae
72 changed files with 72 additions and 72 deletions
  1. +1
    -1
      mindspore/lite/src/ops/addn.cc
  2. +1
    -1
      mindspore/lite/src/ops/argmax.cc
  3. +1
    -1
      mindspore/lite/src/ops/argmin.cc
  4. +1
    -1
      mindspore/lite/src/ops/arithmetic.cc
  5. +1
    -1
      mindspore/lite/src/ops/arithmetic_self.cc
  6. +1
    -1
      mindspore/lite/src/ops/audio_spectrogram.cc
  7. +1
    -1
      mindspore/lite/src/ops/batch_to_space.cc
  8. +1
    -1
      mindspore/lite/src/ops/broadcast_to.cc
  9. +1
    -1
      mindspore/lite/src/ops/cast.cc
  10. +1
    -1
      mindspore/lite/src/ops/concat.cc
  11. +1
    -1
      mindspore/lite/src/ops/constant_of_shape.cc
  12. +1
    -1
      mindspore/lite/src/ops/conv2d.cc
  13. +1
    -1
      mindspore/lite/src/ops/crop.cc
  14. +1
    -1
      mindspore/lite/src/ops/deconv2d.cc
  15. +1
    -1
      mindspore/lite/src/ops/dedepthwise_conv2d.cc
  16. +1
    -1
      mindspore/lite/src/ops/depth_to_space.cc
  17. +1
    -1
      mindspore/lite/src/ops/depthwise_conv2d.cc
  18. +1
    -1
      mindspore/lite/src/ops/detection_post_process.cc
  19. +1
    -1
      mindspore/lite/src/ops/dropout.cc
  20. +1
    -1
      mindspore/lite/src/ops/dropout_grad.cc
  21. +1
    -1
      mindspore/lite/src/ops/embedding_lookup.cc
  22. +1
    -1
      mindspore/lite/src/ops/expand_dims.cc
  23. +1
    -1
      mindspore/lite/src/ops/fft_imag.cc
  24. +1
    -1
      mindspore/lite/src/ops/fft_real.cc
  25. +1
    -1
      mindspore/lite/src/ops/fill.cc
  26. +1
    -1
      mindspore/lite/src/ops/flatten.cc
  27. +1
    -1
      mindspore/lite/src/ops/flatten_grad.cc
  28. +1
    -1
      mindspore/lite/src/ops/full_connection.cc
  29. +1
    -1
      mindspore/lite/src/ops/gather.cc
  30. +1
    -1
      mindspore/lite/src/ops/gather_nd.cc
  31. +1
    -1
      mindspore/lite/src/ops/layer_norm.cc
  32. +1
    -1
      mindspore/lite/src/ops/lstm.cc
  33. +1
    -1
      mindspore/lite/src/ops/matmul.cc
  34. +1
    -1
      mindspore/lite/src/ops/maximum_grad.cc
  35. +1
    -1
      mindspore/lite/src/ops/mean.cc
  36. +1
    -1
      mindspore/lite/src/ops/mfcc.cc
  37. +1
    -1
      mindspore/lite/src/ops/nchw2nhwc.cc
  38. +1
    -1
      mindspore/lite/src/ops/nhwc2nchw.cc
  39. +1
    -1
      mindspore/lite/src/ops/one_hot.cc
  40. +1
    -1
      mindspore/lite/src/ops/pad.cc
  41. +1
    -1
      mindspore/lite/src/ops/pooling.cc
  42. +1
    -1
      mindspore/lite/src/ops/power.cc
  43. +1
    -1
      mindspore/lite/src/ops/prior_box.cc
  44. +1
    -1
      mindspore/lite/src/ops/quant_dtype_cast.cc
  45. +1
    -1
      mindspore/lite/src/ops/range.cc
  46. +1
    -1
      mindspore/lite/src/ops/rank.cc
  47. +1
    -1
      mindspore/lite/src/ops/reduce.cc
  48. +1
    -1
      mindspore/lite/src/ops/reshape.cc
  49. +1
    -1
      mindspore/lite/src/ops/resize.cc
  50. +1
    -1
      mindspore/lite/src/ops/return.cc
  51. +1
    -1
      mindspore/lite/src/ops/reverse_sequence.cc
  52. +1
    -1
      mindspore/lite/src/ops/rfft.cc
  53. +1
    -1
      mindspore/lite/src/ops/roi_pooling.cc
  54. +1
    -1
      mindspore/lite/src/ops/scatter_nd.cc
  55. +1
    -1
      mindspore/lite/src/ops/shape.cc
  56. +1
    -1
      mindspore/lite/src/ops/slice.cc
  57. +1
    -1
      mindspore/lite/src/ops/softmax.cc
  58. +1
    -1
      mindspore/lite/src/ops/space_to_batch.cc
  59. +1
    -1
      mindspore/lite/src/ops/space_to_batch_nd.cc
  60. +1
    -1
      mindspore/lite/src/ops/space_to_depth.cc
  61. +1
    -1
      mindspore/lite/src/ops/sparse_to_dense.cc
  62. +1
    -1
      mindspore/lite/src/ops/split.cc
  63. +1
    -1
      mindspore/lite/src/ops/squeeze.cc
  64. +1
    -1
      mindspore/lite/src/ops/stack.cc
  65. +1
    -1
      mindspore/lite/src/ops/tile.cc
  66. +1
    -1
      mindspore/lite/src/ops/topk.cc
  67. +1
    -1
      mindspore/lite/src/ops/transpose.cc
  68. +1
    -1
      mindspore/lite/src/ops/unique.cc
  69. +1
    -1
      mindspore/lite/src/ops/unsqueeze.cc
  70. +1
    -1
      mindspore/lite/src/ops/unstack.cc
  71. +1
    -1
      mindspore/lite/src/ops/where.cc
  72. +1
    -1
      mindspore/lite/src/ops/zeros_like.cc

+ 1
- 1
mindspore/lite/src/ops/addn.cc View File

@@ -86,7 +86,7 @@ int AddN::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs
output->set_format(input->format());
output->set_data_type(input->data_type());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
output->set_shape(input->shape());



+ 1
- 1
mindspore/lite/src/ops/argmax.cc View File

@@ -74,7 +74,7 @@ int ArgMax::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outp
output->set_format(input->format());
output->set_data_type(input->data_type());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
std::vector<int> output_shape(input->shape());
auto input_shape_size = input->shape().size();


+ 1
- 1
mindspore/lite/src/ops/argmin.cc View File

@@ -72,7 +72,7 @@ int ArgMin::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Te
output->set_format(input->format());
output->set_data_type(input->data_type());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto input_shape_size = input->shape().size();
auto axis = GetAxis() < 0 ? GetAxis() + input_shape_size : GetAxis();


+ 1
- 1
mindspore/lite/src/ops/arithmetic.cc View File

@@ -45,7 +45,7 @@ int Arithmetic::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite
output->set_format(format);
output->set_data_type(input0->data_type());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
if (input_shape0.size() > 10 || input_shape1.size() > 10) {
int wrong_dim = input_shape0.size() > input_shape1.size() ? input_shape0.size() : input_shape1.size();


+ 1
- 1
mindspore/lite/src/ops/arithmetic_self.cc View File

@@ -33,7 +33,7 @@ int ArithmeticSelf::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor
output->set_format(input->format());
output->set_data_type(input->data_type());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
output->set_shape(input->shape());
return RET_OK;


+ 1
- 1
mindspore/lite/src/ops/audio_spectrogram.cc View File

@@ -77,7 +77,7 @@ int AudioSpectrogram::InferShape(std::vector<Tensor *> inputs_, std::vector<Tens
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto input_shape = input->shape();
if (input_shape.size() != 2) {


+ 1
- 1
mindspore/lite/src/ops/batch_to_space.cc View File

@@ -98,7 +98,7 @@ int BatchToSpace::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lit
outputs[0]->set_format(input->format());
outputs[0]->set_data_type(input->data_type());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto input_shape = input->shape();
if (input_shape.size() != kDimension_4d) {


+ 1
- 1
mindspore/lite/src/ops/broadcast_to.cc View File

@@ -80,7 +80,7 @@ int BroadcastTo::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *>
outputs[0]->set_format(input->format());
outputs[0]->set_data_type(input->data_type());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
std::vector<int32_t> dst_shape(GetDstShape());
for (size_t i = 0; i < dst_shape.size(); ++i) {


+ 1
- 1
mindspore/lite/src/ops/cast.cc View File

@@ -93,7 +93,7 @@ int Cast::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output

output->set_data_type(static_cast<TypeId>(GetDstT()));
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

if (GetSrcT() != 0 && input->data_type() != GetSrcT()) {


+ 1
- 1
mindspore/lite/src/ops/concat.cc View File

@@ -98,7 +98,7 @@ int Concat::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outp
output->set_data_type(input0->data_type());
output->set_format(input0->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

auto input0_shape = inputs_.at(0)->shape();


+ 1
- 1
mindspore/lite/src/ops/constant_of_shape.cc View File

@@ -83,7 +83,7 @@ int ConstantOfShape::InferShape(std::vector<Tensor *> inputs_, std::vector<Tenso
out_tensor->set_data_type(static_cast<TypeId>(GetDataType()));
out_tensor->set_format(in_tensor->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto in_data = reinterpret_cast<int *>(in_tensor->data_c());
if (in_data == nullptr) {


+ 1
- 1
mindspore/lite/src/ops/conv2d.cc View File

@@ -391,7 +391,7 @@ int Conv2D::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outp
pad_r_ = GetPadRight();

if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto in_shape = input_tensor->shape();
int input_h = in_shape.at(1);


+ 1
- 1
mindspore/lite/src/ops/crop.cc View File

@@ -71,7 +71,7 @@ int Crop::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs
outputs[0]->set_format(inputs[0]->format());
outputs[0]->set_data_type(inputs[0]->data_type());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
outputs[0]->set_shape(inputs[1]->shape());
return RET_OK;


+ 1
- 1
mindspore/lite/src/ops/deconv2d.cc View File

@@ -317,7 +317,7 @@ int DeConv2D::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::
output->set_format(input->format());
output->set_data_type(input->data_type());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
int32_t input_h = input->Height();
int32_t input_w = input->Width();


+ 1
- 1
mindspore/lite/src/ops/dedepthwise_conv2d.cc View File

@@ -138,7 +138,7 @@ int DeDepthwiseConv2D::InferShape(std::vector<lite::Tensor *> inputs_, std::vect
output->set_format(input->format());
output->set_data_type(input->data_type());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto in_shape = input->shape();
int input_h = in_shape.at(1);


+ 1
- 1
mindspore/lite/src/ops/depth_to_space.cc View File

@@ -73,7 +73,7 @@ int DepthToSpace::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lit
outputs[0]->set_data_type(input->data_type());
outputs[0]->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto input_shape = input->shape();
if (input_shape.size() != kDimension_4d) {


+ 1
- 1
mindspore/lite/src/ops/depthwise_conv2d.cc View File

@@ -219,7 +219,7 @@ int DepthwiseConv2D::InferShape(std::vector<lite::Tensor *> inputs_, std::vector
pad_r_ = GetPadRight();

if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto in_shape = input->shape();
int input_h = in_shape.at(1);


+ 1
- 1
mindspore/lite/src/ops/detection_post_process.cc View File

@@ -190,7 +190,7 @@ int DetectionPostProcess::InferShape(std::vector<lite::Tensor *> inputs_, std::v
num_det->set_format(boxes->format());
num_det->set_data_type(kNumberTypeFloat32);
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
const auto max_detections = GetMaxDetections();
const auto max_classes_per_detection = GetMaxClassesPerDetection();


+ 1
- 1
mindspore/lite/src/ops/dropout.cc View File

@@ -84,7 +84,7 @@ int Dropout::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out
auto output0 = outputs_.front();
MS_ASSERT(output0 != nullptr);
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
output0->set_shape(input->shape());
output0->set_data_type(input->data_type());


+ 1
- 1
mindspore/lite/src/ops/dropout_grad.cc View File

@@ -87,7 +87,7 @@ int DropoutGrad::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *>
auto output = outputs_.front();
MS_ASSERT(output != nullptr);
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
output->set_shape(input->shape());
output->set_data_type(input->data_type());


+ 1
- 1
mindspore/lite/src/ops/embedding_lookup.cc View File

@@ -70,7 +70,7 @@ int EmbeddingLookup::InferShape(std::vector<Tensor *> inputs_, std::vector<Tenso
output->set_format(params_->format());
output->set_data_type(params_->data_type());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

auto embedding_shape = params_->shape();


+ 1
- 1
mindspore/lite/src/ops/expand_dims.cc View File

@@ -103,7 +103,7 @@ int ExpandDims::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *>
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
int dim = GetDim();
if (dim < 0) {


+ 1
- 1
mindspore/lite/src/ops/fft_imag.cc View File

@@ -43,7 +43,7 @@ int FftImag::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out
output->set_data_type(TypeId::kNumberTypeFloat32);
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto input_shape = input->shape();
input_shape.pop_back();


+ 1
- 1
mindspore/lite/src/ops/fft_real.cc View File

@@ -43,7 +43,7 @@ int FftReal::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out
output->set_data_type(TypeId::kNumberTypeFloat32);
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto input_shape = input->shape();
input_shape.pop_back();


+ 1
- 1
mindspore/lite/src/ops/fill.cc View File

@@ -71,7 +71,7 @@ int Fill::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

std::vector<int> output_shape;


+ 1
- 1
mindspore/lite/src/ops/flatten.cc View File

@@ -40,7 +40,7 @@ int Flatten::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

auto input_shape = input->shape();


+ 1
- 1
mindspore/lite/src/ops/flatten_grad.cc View File

@@ -39,7 +39,7 @@ int FlattenGrad::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *>
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

auto input_shape = input->shape();


+ 1
- 1
mindspore/lite/src/ops/full_connection.cc View File

@@ -70,7 +70,7 @@ int FullConnection::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<
auto output = outputs_.front();
MS_ASSERT(output != nullptr);
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
if ((GetHasBias() && inputs_.size() != kMultiNum) || (!GetHasBias() && inputs_.size() != kDoubleNum)) {
MS_LOG(ERROR) << "Input tensors num error";


+ 1
- 1
mindspore/lite/src/ops/gather.cc View File

@@ -114,7 +114,7 @@ int Gather::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outp
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

int axis = GetAxis();


+ 1
- 1
mindspore/lite/src/ops/gather_nd.cc View File

@@ -66,7 +66,7 @@ int GatherNd::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> ou
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto in_shape = input->shape();
int in_rank = in_shape.size();


+ 1
- 1
mindspore/lite/src/ops/layer_norm.cc View File

@@ -103,7 +103,7 @@ int LayerNorm::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite:
}
}
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

output->set_shape(input_shape);


+ 1
- 1
mindspore/lite/src/ops/lstm.cc View File

@@ -68,7 +68,7 @@ int Lstm::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output
outputs_.at(i)->set_format(input->format());
}
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

std::vector<int> in_shape = input->shape();


+ 1
- 1
mindspore/lite/src/ops/matmul.cc View File

@@ -101,7 +101,7 @@ int MatMul::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outp
output->set_data_type(input0->data_type());
output->set_format(input0->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

std::vector<int> a_shape = input0->shape();


+ 1
- 1
mindspore/lite/src/ops/maximum_grad.cc View File

@@ -93,7 +93,7 @@ int MaximumGrad::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *>
MS_ASSERT(dx1 != nullptr);
MS_ASSERT(dx2 != nullptr);
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

auto inShape0 = x1->shape();


+ 1
- 1
mindspore/lite/src/ops/mean.cc View File

@@ -77,7 +77,7 @@ int Mean::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
if (this->primitive_ == nullptr) {
return RET_NULL_PTR;


+ 1
- 1
mindspore/lite/src/ops/mfcc.cc View File

@@ -59,7 +59,7 @@ int Mfcc::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto input_shape = input->shape();
if (input_shape.size() != 3) {


+ 1
- 1
mindspore/lite/src/ops/nchw2nhwc.cc View File

@@ -48,7 +48,7 @@ int Nchw2Nhwc::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite:
output->set_format(schema::Format::Format_NHWC);
output->set_data_type(input->data_type());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
std::vector<int> nchw_shape = input->shape();
if (nchw_shape.size() != 4) {


+ 1
- 1
mindspore/lite/src/ops/nhwc2nchw.cc View File

@@ -49,7 +49,7 @@ int Nhwc2Nchw::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite:
output->set_format(schema::Format::Format_NCHW);
output->set_data_type(input->data_type());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
std::vector<int> nhwc_shape = input->shape();
if (nhwc_shape.size() != 4) {


+ 1
- 1
mindspore/lite/src/ops/one_hot.cc View File

@@ -117,7 +117,7 @@ int OneHot::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outpu
output->set_data_type(on_value->data_type());
output->set_format(on_value->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
const auto input_shape = input->shape();
int input_rank = static_cast<int>(input_shape.size());


+ 1
- 1
mindspore/lite/src/ops/pad.cc View File

@@ -83,7 +83,7 @@ int Pad::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs)
output->set_format(input->format());
output->set_data_type(input->data_type());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

std::vector<int> paddings;


+ 1
- 1
mindspore/lite/src/ops/pooling.cc View File

@@ -181,7 +181,7 @@ int Pooling::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out
output->set_data_type(input->data_type());
output->set_format(schema::Format::Format_NHWC);
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
int input_h = input->shape().at(1);
int input_w = input->shape().at(2);


+ 1
- 1
mindspore/lite/src/ops/power.cc View File

@@ -115,7 +115,7 @@ int Power::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> output
output_tensor->set_data_type(x_tensor->data_type());
output_tensor->set_format(x_tensor->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
if (exp_tensor != nullptr) {
if ((exp_tensor->shape().size() > 1 && exp_tensor->shape() != x_tensor->shape()) ||


+ 1
- 1
mindspore/lite/src/ops/prior_box.cc View File

@@ -140,7 +140,7 @@ int PriorBox::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> ou
output->set_data_type(kNumberTypeFloat32);
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
std::vector<float> different_aspect_ratios{1.0f};
auto aspect_ratios = GetAspectRatios();


+ 1
- 1
mindspore/lite/src/ops/quant_dtype_cast.cc View File

@@ -63,7 +63,7 @@ int QuantDTypeCast::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor
output->set_data_type(static_cast<TypeId>(GetDstT()));
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
output->set_shape(input->shape());
return RET_OK;


+ 1
- 1
mindspore/lite/src/ops/range.cc View File

@@ -71,7 +71,7 @@ int Range::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outpu
}
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

int shape_size = 0;


+ 1
- 1
mindspore/lite/src/ops/rank.cc View File

@@ -45,7 +45,7 @@ int Rank::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
std::vector<int> in_shape(1, 1);
output->set_shape(in_shape);


+ 1
- 1
mindspore/lite/src/ops/reduce.cc View File

@@ -157,7 +157,7 @@ int Reduce::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outp
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
if (this->primitive_ == nullptr) {
return RET_NULL_PTR;


+ 1
- 1
mindspore/lite/src/ops/reshape.cc View File

@@ -173,7 +173,7 @@ int Reshape::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

std::vector<int> out_shape;


+ 1
- 1
mindspore/lite/src/ops/resize.cc View File

@@ -131,7 +131,7 @@ int Resize::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Te
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

std::vector<int> output_shape;


+ 1
- 1
mindspore/lite/src/ops/return.cc View File

@@ -72,7 +72,7 @@ int Return::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outp
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
if (this->primitive_ == nullptr) {
return RET_NULL_PTR;


+ 1
- 1
mindspore/lite/src/ops/reverse_sequence.cc View File

@@ -66,7 +66,7 @@ int ReverseSequence::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
output->set_shape(input->shape());
return RET_OK;


+ 1
- 1
mindspore/lite/src/ops/rfft.cc View File

@@ -54,7 +54,7 @@ int Rfft::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output
output->set_data_type(TypeId::kNumberTypeComplex64);
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto input_shape = input->shape();
input_shape[input_shape.size() - 1] = GetFftLength() / 2 + 1;


+ 1
- 1
mindspore/lite/src/ops/roi_pooling.cc View File

@@ -79,7 +79,7 @@ int ROIPooling::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *>
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

auto new_h = GetPooledH();


+ 1
- 1
mindspore/lite/src/ops/scatter_nd.cc View File

@@ -62,7 +62,7 @@ int ScatterND::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> o
output->set_data_type(update->data_type());
output->set_format(update->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto shape_data = reinterpret_cast<int *>(shape->MutableData());
std::vector<int> out_shape(shape_data, shape_data + shape->ElementsNum());


+ 1
- 1
mindspore/lite/src/ops/shape.cc View File

@@ -44,7 +44,7 @@ int Shape::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outpu
out_tensor->set_data_type(kNumberTypeInt32);
out_tensor->set_format(schema::Format::Format_NHWC);
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
std::vector<int> out_shape;
out_shape.push_back(static_cast<int>(in_tensor->shape().size()));


+ 1
- 1
mindspore/lite/src/ops/slice.cc View File

@@ -175,7 +175,7 @@ int Slice::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tens
outputs[0]->set_data_type(input->data_type());
outputs[0]->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto input_shape = input->shape();
std::vector<int32_t> slice_begin(GetBegin());


+ 1
- 1
mindspore/lite/src/ops/softmax.cc View File

@@ -86,7 +86,7 @@ int SoftMax::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
if (input->shape().size() > 5) {
MS_LOG(ERROR) << "Softmax input dim must be less than 5, get " << input->shape().size();


+ 1
- 1
mindspore/lite/src/ops/space_to_batch.cc View File

@@ -97,7 +97,7 @@ int SpaceToBatch::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lit
outputs[0]->set_data_type(input->data_type());
outputs[0]->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto input_shape = input->shape();
if (input_shape.size() != kDimension_4d) {


+ 1
- 1
mindspore/lite/src/ops/space_to_batch_nd.cc View File

@@ -99,7 +99,7 @@ int SpaceToBatchND::InferShape(std::vector<lite::Tensor *> inputs, std::vector<l
outputs[0]->set_data_type(input->data_type());
outputs[0]->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto input_shape = input->shape();
if (input_shape.size() != kDimension_4d) {


+ 1
- 1
mindspore/lite/src/ops/space_to_depth.cc View File

@@ -74,7 +74,7 @@ int SpaceToDepth::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lit
outputs[0]->set_format(input->format());
outputs[0]->set_data_type(input->data_type());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto input_shape = input->shape();
if (input_shape.size() != kDimension_4d) {


+ 1
- 1
mindspore/lite/src/ops/sparse_to_dense.cc View File

@@ -64,7 +64,7 @@ int SparseToDense::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor
outputs_[0]->set_format(input2->format());

if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
if (this->primitive_ == nullptr) {
return RET_NULL_PTR;


+ 1
- 1
mindspore/lite/src/ops/split.cc View File

@@ -123,7 +123,7 @@ int Split::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outpu
outputs_[i]->set_format(input->format());
}
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
size_t split_dim = GetSplitDim() == -1 ? input->shape().size() - 1 : GetSplitDim();
std::vector<int> input_shape = input->shape();


+ 1
- 1
mindspore/lite/src/ops/squeeze.cc View File

@@ -105,7 +105,7 @@ int Squeeze::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> out
outputs_.front()->set_data_type(in_tensor->data_type());
outputs_.front()->set_format(in_tensor->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto in_shape = in_tensor->shape();
std::vector<int> out_shape;


+ 1
- 1
mindspore/lite/src/ops/stack.cc View File

@@ -83,7 +83,7 @@ int Stack::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> output
outputs[0]->set_data_type(input0_data_type);
outputs[0]->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto input_shape = input->shape();



+ 1
- 1
mindspore/lite/src/ops/tile.cc View File

@@ -135,7 +135,7 @@ int Tile::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

std::vector<int> out_shape;


+ 1
- 1
mindspore/lite/src/ops/topk.cc View File

@@ -69,7 +69,7 @@ int TopK::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> output
output1->set_data_type(kNumberTypeInt32);
output1->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
auto out_shape = input->shape();
out_shape[out_shape.size() - 1] = GetK();


+ 1
- 1
mindspore/lite/src/ops/transpose.cc View File

@@ -122,7 +122,7 @@ int Transpose::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> o
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
MS_ASSERT(inputs_.size() == kSingleNum || inputs_.size() == kDoubleNum);
MS_ASSERT(outputs_.size() == kSingleNum);


+ 1
- 1
mindspore/lite/src/ops/unique.cc View File

@@ -65,7 +65,7 @@ int Unique::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outp
output1->set_format(input->format());
output0->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
output0->set_shape(input->shape());
output1->set_shape(input->shape());


+ 1
- 1
mindspore/lite/src/ops/unsqueeze.cc View File

@@ -78,7 +78,7 @@ int Unsqueeze::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> o
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}

auto dims = GetAxis();


+ 1
- 1
mindspore/lite/src/ops/unstack.cc View File

@@ -66,7 +66,7 @@ int Unstack::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outp
out->set_format(input->format());
}
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
std::vector<int> output_shape;
for (size_t i = 0; i < input_shape.size(); ++i) {


+ 1
- 1
mindspore/lite/src/ops/where.cc View File

@@ -81,7 +81,7 @@ int Where::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outpu
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
int num = input0->ElementsNum();
int num1 = input1->ElementsNum();


+ 1
- 1
mindspore/lite/src/ops/zeros_like.cc View File

@@ -56,7 +56,7 @@ int ZerosLike::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite:
output->set_data_type(input->data_type());
output->set_format(input->format());
if (!infer_flag()) {
return RET_OK;
return RET_INFER_INVALID;
}
output->set_shape(input->shape());
return RET_OK;


Loading…
Cancel
Save