From: @xu_anyue Reviewed-by: @jpc_chenjianping Signed-off-by: @jpc_chenjianpingpull/15901/MERGE
| @@ -33,7 +33,7 @@ int AddSubGradInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso | |||||
| TensorC *dx1 = outputs[0]; | TensorC *dx1 = outputs[0]; | ||||
| TensorC *dx2 = outputs[1]; | TensorC *dx2 = outputs[1]; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -32,7 +32,7 @@ int AddnInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o | |||||
| return NNACL_ERR; | return NNACL_ERR; | ||||
| } | } | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -49,7 +49,7 @@ int ArgMinMaxInferShape(const TensorC *const *inputs, const size_t inputs_size, | |||||
| if (output_2 != NULL) { | if (output_2 != NULL) { | ||||
| SetDataTypeFormat(output_2, input); | SetDataTypeFormat(output_2, input); | ||||
| } | } | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int output_shape[MAX_SHAPE_SIZE] = {0}; | int output_shape[MAX_SHAPE_SIZE] = {0}; | ||||
| @@ -83,7 +83,7 @@ int ArithmeticInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso | |||||
| size_t input_shape1_size = input1->shape_size_; | size_t input_shape1_size = input1->shape_size_; | ||||
| SetOutputDtypeFormat(input0, input1, output); | SetOutputDtypeFormat(input0, input1, output); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| if (input_shape0_size >= MAX_SHAPE_SIZE || input_shape1_size >= MAX_SHAPE_SIZE) { | if (input_shape0_size >= MAX_SHAPE_SIZE || input_shape1_size >= MAX_SHAPE_SIZE) { | ||||
| @@ -50,7 +50,7 @@ int AudioSpectrogramInferShape(const TensorC *const *inputs, size_t inputs_size, | |||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| if (input->shape_size_ != 2) { | if (input->shape_size_ != 2) { | ||||
| @@ -115,7 +115,7 @@ int BatchToSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, Ten | |||||
| return NNACL_ERR; | return NNACL_ERR; | ||||
| } | } | ||||
| SetDataTypeFormat(outputs[0], input); | SetDataTypeFormat(outputs[0], input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -28,7 +28,7 @@ int BroadcastToInferShape(const TensorC *const *inputs, size_t inputs_size, Tens | |||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| SetDataTypeFormat(outputs[0], input); | SetDataTypeFormat(outputs[0], input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| BroadcastToParameter *param = (BroadcastToParameter *)parameter; | BroadcastToParameter *param = (BroadcastToParameter *)parameter; | ||||
| @@ -31,7 +31,7 @@ int CastInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o | |||||
| output->format_ = input->format_; | output->format_ = input->format_; | ||||
| const TensorC *dst_type = inputs[1]; | const TensorC *dst_type = inputs[1]; | ||||
| output->data_type_ = *((int *)dst_type->data_); | output->data_type_ = *((int *)dst_type->data_); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| if (input->data_type_ != kNumberTypeBool && input->data_type_ != kNumberTypeUInt8 && | if (input->data_type_ != kNumberTypeBool && input->data_type_ != kNumberTypeUInt8 && | ||||
| @@ -343,7 +343,7 @@ int CommonInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * | |||||
| return NNACL_NULL_PTR; | return NNACL_NULL_PTR; | ||||
| } | } | ||||
| SetDataTypeFormat(outputs[0], inputs[0]); | SetDataTypeFormat(outputs[0], inputs[0]); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| SetShapeTensor(outputs[0], inputs[0]); | SetShapeTensor(outputs[0], inputs[0]); | ||||
| @@ -356,7 +356,7 @@ int FftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **ou | |||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| output->data_type_ = kNumberTypeFloat32; | output->data_type_ = kNumberTypeFloat32; | ||||
| output->format_ = input->format_; | output->format_ = input->format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int input_shape[MAX_SHAPE_SIZE] = {0}; | int input_shape[MAX_SHAPE_SIZE] = {0}; | ||||
| @@ -454,6 +454,30 @@ void VectorCFree(VectorC *vc) { | |||||
| vc->data_ = NULL; | vc->data_ = NULL; | ||||
| } | } | ||||
| bool InferFlag(const TensorC *const *inputs, size_t inputs_size) { | |||||
| if (inputs == NULL) { | |||||
| return false; | |||||
| } | |||||
| for (size_t i = 0; i < inputs_size; i++) { | |||||
| if (inputs[i] == NULL) { | |||||
| return false; | |||||
| } | |||||
| if (inputs[i]->data_type_ == kObjectTypeTensorType) { | |||||
| TensorListC *input_tensor_list = (TensorListC *)inputs[i]; | |||||
| if (input_tensor_list->shape_value_ == -1) { | |||||
| return false; | |||||
| } | |||||
| } else { | |||||
| for (size_t j = 0; j < inputs[i]->shape_size_; ++j) { | |||||
| if (inputs[i]->shape_[j] == -1) { | |||||
| return false; | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| return true; | |||||
| } | |||||
| REG_INFER(Abs, PrimType_Abs, CommonInferShape) | REG_INFER(Abs, PrimType_Abs, CommonInferShape) | ||||
| REG_INFER(AbsGrad, PrimType_AbsGrad, CommonInferShape) | REG_INFER(AbsGrad, PrimType_AbsGrad, CommonInferShape) | ||||
| REG_INFER(Activation, PrimType_Activation, CommonInferShape) | REG_INFER(Activation, PrimType_Activation, CommonInferShape) | ||||
| @@ -139,7 +139,7 @@ typedef struct TensorListC { | |||||
| bool is_ready_; | bool is_ready_; | ||||
| int data_type_; | int data_type_; | ||||
| int format_; | int format_; | ||||
| int shape_value_; | |||||
| int tensors_data_type_; // element_data_type_, keep same as c++ | int tensors_data_type_; // element_data_type_, keep same as c++ | ||||
| int max_elements_num_; | int max_elements_num_; | ||||
| int element_shape_[8]; | int element_shape_[8]; | ||||
| @@ -204,6 +204,7 @@ int VectorCInsert(VectorC *vc, int index, int value); | |||||
| void VectorCErase(VectorC *vc, int index); | void VectorCErase(VectorC *vc, int index); | ||||
| bool VectorCEqual(VectorC *vc1, VectorC *vc2); | bool VectorCEqual(VectorC *vc1, VectorC *vc2); | ||||
| void VectorCFree(VectorC *vc); | void VectorCFree(VectorC *vc); | ||||
| bool InferFlag(const TensorC *const *inputs, size_t inputs_size); | |||||
| #ifdef __cplusplus | #ifdef __cplusplus | ||||
| } | } | ||||
| @@ -29,7 +29,7 @@ int ConcatInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * | |||||
| const TensorC *input0 = inputs[0]; | const TensorC *input0 = inputs[0]; | ||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input0); | SetDataTypeFormat(output, input0); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -31,7 +31,7 @@ int ConstantOfShapeInferShape(const TensorC *const *inputs, size_t inputs_size, | |||||
| ConstantOfShapeParameter *param = (ConstantOfShapeParameter *)parameter; | ConstantOfShapeParameter *param = (ConstantOfShapeParameter *)parameter; | ||||
| out_tensor->data_type_ = (TypeIdC)(param->data_type_); | out_tensor->data_type_ = (TypeIdC)(param->data_type_); | ||||
| out_tensor->format_ = in_tensor->format_; | out_tensor->format_ = in_tensor->format_; | ||||
| if (!parameter->infer_flag_ || in_tensor->data_ == NULL) { | |||||
| if (!InferFlag(inputs, inputs_size) || in_tensor->data_ == NULL) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int size = GetElementNum(in_tensor); | int size = GetElementNum(in_tensor); | ||||
| @@ -69,7 +69,7 @@ int Conv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * | |||||
| param->group_ = weight_tensor->shape_[0]; | param->group_ = weight_tensor->shape_[0]; | ||||
| } | } | ||||
| param->output_channel_ = weight_tensor->shape_[0]; | param->output_channel_ = weight_tensor->shape_[0]; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| const int *in_shape = input_tensor->shape_; | const int *in_shape = input_tensor->shape_; | ||||
| @@ -27,16 +27,14 @@ int CropAndResizeInferShape(const TensorC *const *inputs, size_t inputs_size, Te | |||||
| #endif | #endif | ||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| if (input->shape_size_ != 0 && input->shape_size_ != 4) { | |||||
| return NNACL_ERR; | |||||
| } | |||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| if (input->shape_size_ != 0 && input->shape_size_ != 4) { | |||||
| return NNACL_ERR; | |||||
| } | |||||
| int output_shape[MAX_SHAPE_SIZE] = {0}; | int output_shape[MAX_SHAPE_SIZE] = {0}; | ||||
| size_t output_shape_size = 0; | size_t output_shape_size = 0; | ||||
| if (inputs[1]->data_ != NULL) { | if (inputs[1]->data_ != NULL) { | ||||
| @@ -27,7 +27,7 @@ int CropInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o | |||||
| #endif | #endif | ||||
| SetDataTypeFormat(outputs[0], inputs[0]); | SetDataTypeFormat(outputs[0], inputs[0]); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| SetShapeTensor(outputs[0], inputs[1]); | SetShapeTensor(outputs[0], inputs[1]); | ||||
| @@ -29,7 +29,7 @@ int CumsumInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * | |||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -36,7 +36,7 @@ int Deconv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC | |||||
| if (param->group_ == 0) { | if (param->group_ == 0) { | ||||
| param->group_ = weight->shape_[0]; | param->group_ = weight->shape_[0]; | ||||
| } | } | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int32_t input_h = GetHeight(input); | int32_t input_h = GetHeight(input); | ||||
| @@ -28,7 +28,7 @@ int DeDepthwiseConv2DInferShape(const TensorC *const *inputs, size_t inputs_size | |||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int input_h = input->shape_[1]; | int input_h = input->shape_[1]; | ||||
| @@ -32,7 +32,7 @@ int DepthToSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, Ten | |||||
| } | } | ||||
| SetDataTypeFormat(outputs[0], input); | SetDataTypeFormat(outputs[0], input); | ||||
| DepthToSpaceParameter *param = (DepthToSpaceParameter *)parameter; | DepthToSpaceParameter *param = (DepthToSpaceParameter *)parameter; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int input_shape[MAX_SHAPE_SIZE] = {0}; | int input_shape[MAX_SHAPE_SIZE] = {0}; | ||||
| @@ -30,7 +30,7 @@ int DepthwiseConv2dInferShape(const TensorC *const *inputs, size_t inputs_size, | |||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| ConvParameter *param = (ConvParameter *)parameter; | ConvParameter *param = (ConvParameter *)parameter; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int input_h = input->shape_[1]; | int input_h = input->shape_[1]; | ||||
| @@ -57,7 +57,7 @@ int DetectionPostProcessInferShape(const TensorC *const *inputs, size_t inputs_s | |||||
| detected_scores->data_type_ = kNumberTypeFloat32; | detected_scores->data_type_ = kNumberTypeFloat32; | ||||
| num_det->format_ = boxes->format_; | num_det->format_ = boxes->format_; | ||||
| num_det->data_type_ = kNumberTypeFloat32; | num_det->data_type_ = kNumberTypeFloat32; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| const int max_detections = param->max_detections_; | const int max_detections = param->max_detections_; | ||||
| @@ -29,7 +29,7 @@ int DropoutGradInferShape(const TensorC *const *inputs, size_t inputs_size, Tens | |||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| SetShapeTensor(output, input); | SetShapeTensor(output, input); | ||||
| @@ -29,7 +29,7 @@ int DropoutInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC | |||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| TensorC *output0 = outputs[0]; | TensorC *output0 = outputs[0]; | ||||
| SetDataTypeFormat(output0, input); | SetDataTypeFormat(output0, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| SetShapeTensor(output0, input); | SetShapeTensor(output0, input); | ||||
| @@ -33,7 +33,7 @@ int EmbeddingLookupInferShape(const TensorC *const *inputs, size_t inputs_size, | |||||
| const TensorC *ids = inputs[inputs_size - 1]; | const TensorC *ids = inputs[inputs_size - 1]; | ||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, params_); | SetDataTypeFormat(output, params_); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -29,7 +29,7 @@ int ExpandDimsInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso | |||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -35,7 +35,7 @@ int FillInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o | |||||
| for (size_t i = 0; i < dst_shape_tensor->shape_size_; ++i) { | for (size_t i = 0; i < dst_shape_tensor->shape_size_; ++i) { | ||||
| num_dims *= dst_shape_tensor->shape_[i]; | num_dims *= dst_shape_tensor->shape_[i]; | ||||
| } | } | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| if (num_dims != 0 && dst_shape == NULL) { | if (num_dims != 0 && dst_shape == NULL) { | ||||
| @@ -30,7 +30,7 @@ int FlattenGradInferShape(const TensorC *const *inputs, size_t inputs_size, Tens | |||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -30,7 +30,7 @@ int FlattenInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC | |||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -31,7 +31,7 @@ int FullConnectionInferShape(const TensorC *const *inputs, size_t inputs_size, T | |||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| MatMulParameter *param = (MatMulParameter *)parameter; | MatMulParameter *param = (MatMulParameter *)parameter; | ||||
| SetDataTypeFormat(output, input0); | SetDataTypeFormat(output, input0); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| if ((param->has_bias_ && inputs_size != 3) || (!param->has_bias_ && inputs_size != 2)) { | if ((param->has_bias_ && inputs_size != 3) || (!param->has_bias_ && inputs_size != 2)) { | ||||
| @@ -38,7 +38,7 @@ int FusedBatchNormInferShape(const TensorC *const *inputs, size_t inputs_size, T | |||||
| outputs[5]->shape_size_ = 1; | outputs[5]->shape_size_ = 1; | ||||
| outputs[5]->shape_[0] = 1; | outputs[5]->shape_[0] = 1; | ||||
| } | } | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| return NNACL_OK; | return NNACL_OK; | ||||
| @@ -30,7 +30,7 @@ int GatherInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * | |||||
| output->data_type_ = kNumberTypeFloat32; | output->data_type_ = kNumberTypeFloat32; | ||||
| } | } | ||||
| output->format_ = input->format_; | output->format_ = input->format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int axis = *((int *)inputs[2]->data_); | int axis = *((int *)inputs[2]->data_); | ||||
| @@ -31,7 +31,7 @@ int GatherNdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC | |||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int in_rank = input->shape_size_; | int in_rank = input->shape_size_; | ||||
| @@ -34,7 +34,7 @@ int GruInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **ou | |||||
| for (int i = 0; i < 2; i++) { | for (int i = 0; i < 2; i++) { | ||||
| SetDataTypeFormat(outputs[i], input); | SetDataTypeFormat(outputs[i], input); | ||||
| } | } | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -29,7 +29,7 @@ int InvertPermutationInferShape(const TensorC *const *inputs, size_t inputs_size | |||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| if (input->data_type_ != kNumberTypeInt32) { | if (input->data_type_ != kNumberTypeInt32) { | ||||
| @@ -34,7 +34,7 @@ int LayerNormInferShape(const TensorC *const *inputs, size_t inputs_size, Tensor | |||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| LayerNormParameter *param = (LayerNormParameter *)parameter; | LayerNormParameter *param = (LayerNormParameter *)parameter; | ||||
| if (!param->op_parameter_.infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| param->begin_norm_axis_ = | param->begin_norm_axis_ = | ||||
| @@ -31,7 +31,7 @@ int LinSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC | |||||
| output->data_type_ = input->data_type_; | output->data_type_ = input->data_type_; | ||||
| output->format_ = input->format_; | output->format_ = input->format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int *num = (int *)(inputs[2]->data_); | int *num = (int *)(inputs[2]->data_); | ||||
| @@ -31,7 +31,7 @@ int LogSoftmaxInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso | |||||
| output->data_type_ = input->data_type_; | output->data_type_ = input->data_type_; | ||||
| output->format_ = input->format_; | output->format_ = input->format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| if (input->shape_size_ > 5) { | if (input->shape_size_ > 5) { | ||||
| @@ -34,7 +34,7 @@ int LstmInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o | |||||
| } | } | ||||
| LstmParameter *param = (LstmParameter *)parameter; | LstmParameter *param = (LstmParameter *)parameter; | ||||
| if (!param->op_parameter_.infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -32,7 +32,7 @@ int MatmulInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * | |||||
| SetDataTypeFormat(output, input0); | SetDataTypeFormat(output, input0); | ||||
| MatMulParameter *param = (MatMulParameter *)parameter; | MatMulParameter *param = (MatMulParameter *)parameter; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -33,7 +33,7 @@ int MaxMinGradInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso | |||||
| TensorC *dx1 = outputs[0]; | TensorC *dx1 = outputs[0]; | ||||
| TensorC *dx2 = outputs[1]; | TensorC *dx2 = outputs[1]; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -28,7 +28,7 @@ int MeanInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o | |||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| ReduceParameter *param = (ReduceParameter *)parameter; | ReduceParameter *param = (ReduceParameter *)parameter; | ||||
| @@ -71,11 +71,6 @@ int MergeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** | |||||
| } | } | ||||
| #endif | #endif | ||||
| if (!parameter->infer_flag_) { | |||||
| MergeDataTypeInfer((struct TensorC **)inputs, inputs_size, outputs, outputs_size); | |||||
| return NNACL_INFER_INVALID; | |||||
| } | |||||
| const TensorC *const *left_part_inputs = inputs; | const TensorC *const *left_part_inputs = inputs; | ||||
| size_t left_part_inputs_size = inputs_size / 2; | size_t left_part_inputs_size = inputs_size / 2; | ||||
| @@ -90,6 +85,7 @@ int MergeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** | |||||
| return MergeInfer((TensorC **)right_part_inputs, right_part_inputs_size, outputs, outputs_size); | return MergeInfer((TensorC **)right_part_inputs, right_part_inputs_size, outputs, outputs_size); | ||||
| } | } | ||||
| MergeDataTypeInfer((struct TensorC **)inputs, inputs_size, outputs, outputs_size); | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -29,7 +29,7 @@ int MfccInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o | |||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| if (input->shape_size_ != 3) { | if (input->shape_size_ != 3) { | ||||
| @@ -38,7 +38,7 @@ int OneHotInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * | |||||
| return NNACL_NULL_PTR; | return NNACL_NULL_PTR; | ||||
| } | } | ||||
| SetDataTypeFormat(output, on_value); | SetDataTypeFormat(output, on_value); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| OneHotParameter *param = (OneHotParameter *)parameter; | OneHotParameter *param = (OneHotParameter *)parameter; | ||||
| @@ -30,7 +30,7 @@ int PadInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **ou | |||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| PadParameter *param = (PadParameter *)parameter; | PadParameter *param = (PadParameter *)parameter; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -31,7 +31,7 @@ int PoolingInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC | |||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| PoolingParameter *param = (PoolingParameter *)parameter; | PoolingParameter *param = (PoolingParameter *)parameter; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int input_h = input->shape_[1]; | int input_h = input->shape_[1]; | ||||
| @@ -40,7 +40,7 @@ int PowerInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** | |||||
| TensorC *output_tensor = outputs[0]; | TensorC *output_tensor = outputs[0]; | ||||
| SetDataTypeFormat(output_tensor, x_tensor); | SetDataTypeFormat(output_tensor, x_tensor); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| if (exp_tensor != NULL) { | if (exp_tensor != NULL) { | ||||
| @@ -31,7 +31,7 @@ int PriorBoxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC | |||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| output->data_type_ = kNumberTypeFloat32; | output->data_type_ = kNumberTypeFloat32; | ||||
| output->format_ = input->format_; | output->format_ = input->format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| float different_aspect_ratios[MAX_SHAPE_SIZE * 2 + 1]; // NOTE: flip double the number | float different_aspect_ratios[MAX_SHAPE_SIZE * 2 + 1]; // NOTE: flip double the number | ||||
| @@ -32,7 +32,7 @@ int QuantDtypeCastInferShape(const TensorC *const *inputs, size_t inputs_size, T | |||||
| QuantDtypeCastParameter *param = (QuantDtypeCastParameter *)parameter; | QuantDtypeCastParameter *param = (QuantDtypeCastParameter *)parameter; | ||||
| output->data_type_ = param->dstT_; | output->data_type_ = param->dstT_; | ||||
| output->format_ = input->format_; | output->format_ = input->format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| SetShapeTensor(output, input); | SetShapeTensor(output, input); | ||||
| @@ -27,7 +27,7 @@ int RandomStandardNormalInferShape(const TensorC *const *inputs, size_t inputs_s | |||||
| #endif | #endif | ||||
| outputs[0]->data_type_ = kNumberTypeFloat32; | outputs[0]->data_type_ = kNumberTypeFloat32; | ||||
| outputs[0]->format_ = inputs[0]->format_; | outputs[0]->format_ = inputs[0]->format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -39,7 +39,7 @@ int RangeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** | |||||
| output->data_type_ = kNumberTypeInt32; | output->data_type_ = kNumberTypeInt32; | ||||
| } | } | ||||
| output->format_ = input->format_; | output->format_ = input->format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -29,7 +29,7 @@ int RankInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o | |||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| output->shape_size_ = 1; | output->shape_size_ = 1; | ||||
| @@ -63,7 +63,7 @@ int ReduceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * | |||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| ReduceParameter *param = (ReduceParameter *)parameter; | ReduceParameter *param = (ReduceParameter *)parameter; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| bool keep_dims = param->keep_dims_; | bool keep_dims = param->keep_dims_; | ||||
| @@ -142,7 +142,7 @@ int ReshapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC | |||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| ReshapeParameter *param = (ReshapeParameter *)parameter; | ReshapeParameter *param = (ReshapeParameter *)parameter; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -32,7 +32,7 @@ int ResizeGradInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso | |||||
| } | } | ||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| const TensorC *input_1 = inputs[1]; | const TensorC *input_1 = inputs[1]; | ||||
| @@ -116,17 +116,15 @@ int ResizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * | |||||
| #endif | #endif | ||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| if (input->shape_size_ != 0 && input->shape_size_ != 4) { | |||||
| return NNACL_ERR; | |||||
| } | |||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| ResizeParameter *param = (ResizeParameter *)parameter; | |||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| if (input->shape_size_ != 0 && input->shape_size_ != 4) { | |||||
| return NNACL_ERR; | |||||
| } | |||||
| ResizeParameter *param = (ResizeParameter *)parameter; | |||||
| int output_shape[MAX_SHAPE_SIZE] = {0}; | int output_shape[MAX_SHAPE_SIZE] = {0}; | ||||
| size_t output_shape_size = 0; | size_t output_shape_size = 0; | ||||
| ShapePush(output_shape, &output_shape_size, GetBatch(input)); | ShapePush(output_shape, &output_shape_size, GetBatch(input)); | ||||
| @@ -30,7 +30,7 @@ int RfftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o | |||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| output->data_type_ = kNumberTypeComplex64; | output->data_type_ = kNumberTypeComplex64; | ||||
| output->format_ = input->format_; | output->format_ = input->format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| ShapeSet(output->shape_, &(output->shape_size_), input->shape_, input->shape_size_); | ShapeSet(output->shape_, &(output->shape_size_), input->shape_, input->shape_size_); | ||||
| @@ -30,7 +30,7 @@ int ROIPoolingInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso | |||||
| const TensorC *roi = inputs[1]; | const TensorC *roi = inputs[1]; | ||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -34,7 +34,7 @@ int ScatterNdInferShape(const TensorC *const *inputs, size_t inputs_size, Tensor | |||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, update); | SetDataTypeFormat(output, update); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int *shape_data = (int *)(shape->data_); | int *shape_data = (int *)(shape->data_); | ||||
| @@ -28,7 +28,7 @@ int SelectInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * | |||||
| } | } | ||||
| #endif | #endif | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| for (size_t i = 0; i < outputs_size; i++) { | for (size_t i = 0; i < outputs_size; i++) { | ||||
| @@ -31,7 +31,7 @@ int ShapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** | |||||
| out_tensor->data_type_ = kNumberTypeInt32; | out_tensor->data_type_ = kNumberTypeInt32; | ||||
| out_tensor->format_ = in_tensor->format_; | out_tensor->format_ = in_tensor->format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| out_tensor->shape_size_ = 1; | out_tensor->shape_size_ = 1; | ||||
| @@ -30,7 +30,7 @@ int SizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o | |||||
| TensorC *out_tensor = outputs[0]; | TensorC *out_tensor = outputs[0]; | ||||
| out_tensor->data_type_ = kNumberTypeInt32; | out_tensor->data_type_ = kNumberTypeInt32; | ||||
| out_tensor->format_ = in_tensor->format_; | out_tensor->format_ = in_tensor->format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -26,7 +26,7 @@ int SliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** | |||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -31,7 +31,7 @@ int SoftMaxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC | |||||
| output->data_type_ = input->data_type_; | output->data_type_ = input->data_type_; | ||||
| output->format_ = input->format_; | output->format_ = input->format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| if (input->shape_size_ > 5) { | if (input->shape_size_ > 5) { | ||||
| @@ -32,7 +32,7 @@ int SpaceToBatchInferShape(const TensorC *const *inputs, size_t inputs_size, Ten | |||||
| } | } | ||||
| SetDataTypeFormat(outputs[0], input); | SetDataTypeFormat(outputs[0], input); | ||||
| SpaceToBatchParameter *param = (SpaceToBatchParameter *)parameter; | SpaceToBatchParameter *param = (SpaceToBatchParameter *)parameter; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| if (input->shape_size_ != 4) { | if (input->shape_size_ != 4) { | ||||
| @@ -110,7 +110,7 @@ int SpaceToBatchNdInferShape(const TensorC *const *inputs, size_t inputs_size, T | |||||
| } | } | ||||
| outputs[0]->data_type_ = input->data_type_; | outputs[0]->data_type_ = input->data_type_; | ||||
| outputs[0]->format_ = input->format_; | outputs[0]->format_ = input->format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -33,7 +33,7 @@ int SpaceToDepthInferShape(const TensorC *const *inputs, size_t inputs_size, Ten | |||||
| } | } | ||||
| SetDataTypeFormat(outputs[0], input); | SetDataTypeFormat(outputs[0], input); | ||||
| SpaceToDepthParameter *param = (SpaceToDepthParameter *)parameter; | SpaceToDepthParameter *param = (SpaceToDepthParameter *)parameter; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| if (input->shape_size_ != 4) { | if (input->shape_size_ != 4) { | ||||
| @@ -30,7 +30,7 @@ int SparseToDenseInferShape(const TensorC *const *inputs, size_t inputs_size, Te | |||||
| const TensorC *input1 = inputs[1]; | const TensorC *input1 = inputs[1]; | ||||
| const TensorC *input2 = inputs[2]; | const TensorC *input2 = inputs[2]; | ||||
| SetDataTypeFormat(output, input2); | SetDataTypeFormat(output, input2); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int *input1_data = (int *)(input1->data_); | int *input1_data = (int *)(input1->data_); | ||||
| @@ -29,7 +29,7 @@ int SpliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * | |||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -41,7 +41,7 @@ int SplitInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** | |||||
| size_t num_split_ = param->num_split_ == 0 ? (int)(outputs_size) : param->num_split_; | size_t num_split_ = param->num_split_ == 0 ? (int)(outputs_size) : param->num_split_; | ||||
| param->num_split_ = num_split_; | param->num_split_ = num_split_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -25,7 +25,7 @@ int SplitWithOverlapInferShape(const TensorC *const *inputs, size_t inputs_size, | |||||
| return check_ret; | return check_ret; | ||||
| } | } | ||||
| #endif | #endif | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| @@ -29,7 +29,7 @@ int SqueezeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC | |||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| SqueezeParameter *param = (SqueezeParameter *)parameter; | SqueezeParameter *param = (SqueezeParameter *)parameter; | ||||
| SetDataTypeFormat(outputs[0], input); | SetDataTypeFormat(outputs[0], input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int out_shape[MAX_SHAPE_SIZE] = {0}; | int out_shape[MAX_SHAPE_SIZE] = {0}; | ||||
| @@ -28,7 +28,7 @@ int StackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** | |||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| SetDataTypeFormat(outputs[0], input); | SetDataTypeFormat(outputs[0], input); | ||||
| StackParameter *param = (StackParameter *)parameter; | StackParameter *param = (StackParameter *)parameter; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int32_t output_shape[MAX_SHAPE_SIZE] = {0}; | int32_t output_shape[MAX_SHAPE_SIZE] = {0}; | ||||
| @@ -37,7 +37,7 @@ int StridedSliceGradInferShape(const TensorC *const *inputs, size_t inputs_size, | |||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| SetDataTypeFormat(outputs[0], input); | SetDataTypeFormat(outputs[0], input); | ||||
| bool inferflag = parameter->infer_flag_; | |||||
| bool inferflag = InferFlag(inputs, inputs_size); | |||||
| int in_shape_[MAX_SHAPE_SIZE] = {0}; | int in_shape_[MAX_SHAPE_SIZE] = {0}; | ||||
| size_t in_shape_size = 0; | size_t in_shape_size = 0; | ||||
| @@ -313,7 +313,7 @@ int StridedSliceInferShape(const TensorC *const *inputs, size_t inputs_size, Ten | |||||
| const TensorC *input = inputs[0]; | const TensorC *input = inputs[0]; | ||||
| SetDataTypeFormat(outputs[0], inputs[0]); | SetDataTypeFormat(outputs[0], inputs[0]); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -57,9 +57,12 @@ int SwitchInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * | |||||
| memcpy(mirror_tensor, inputs[i + 1], sizeof(TensorC)); | memcpy(mirror_tensor, inputs[i + 1], sizeof(TensorC)); | ||||
| outputs[i + outputs_size / 2] = mirror_tensor; | outputs[i + outputs_size / 2] = mirror_tensor; | ||||
| } | } | ||||
| } | |||||
| bool infer_flag = InferFlag(inputs, inputs_size); | |||||
| for (size_t i = 0; i < outputs_size / 2; i++) { | |||||
| *((const TensorC **)inputs + i + 1) = NULL; | *((const TensorC **)inputs + i + 1) = NULL; | ||||
| } | } | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!infer_flag) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| return NNACL_OK; | return NNACL_OK; | ||||
| @@ -32,7 +32,7 @@ int TensorListFromTensorInferShape(const TensorC *const *inputs, size_t inputs_s | |||||
| output->format_ = Format_NHWC; | output->format_ = Format_NHWC; | ||||
| output->tensors_data_type_ = input0->data_type_; | output->tensors_data_type_ = input0->data_type_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -28,14 +28,14 @@ int TensorListGetItemInferShape(const TensorC *const *inputs, size_t inputs_size | |||||
| TensorListC *input0 = (TensorListC *)(inputs[0]); | TensorListC *input0 = (TensorListC *)(inputs[0]); | ||||
| const TensorC *get_index = inputs[1]; | const TensorC *get_index = inputs[1]; | ||||
| if (get_index->data_ == NULL) { | |||||
| return NNACL_INFER_INVALID; | |||||
| } | |||||
| if (GetElementNum(get_index) != 1) { | if (GetElementNum(get_index) != 1) { | ||||
| return NNACL_ERR; | return NNACL_ERR; | ||||
| } | } | ||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| if (!parameter->infer_flag_ || input0->element_num_ == 0) { | |||||
| return NNACL_INFER_INVALID; | |||||
| } | |||||
| if (get_index->data_ == NULL) { | |||||
| if (!InferFlag(inputs, inputs_size) || input0->element_num_ == 0) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int index = ((int *)(get_index->data_))[0]; | int index = ((int *)(get_index->data_))[0]; | ||||
| @@ -51,7 +51,7 @@ int TensorListGetItemInferShape(const TensorC *const *inputs, size_t inputs_size | |||||
| } | } | ||||
| output->format_ = input0->tensors_[index].format_; | output->format_ = input0->tensors_[index].format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -49,12 +49,12 @@ int TensorListReserveInferShape(const TensorC *const *inputs, size_t inputs_size | |||||
| if (num_ele_type != kNumberTypeInt && ele_shape_type != kNumberTypeInt32) { | if (num_ele_type != kNumberTypeInt && ele_shape_type != kNumberTypeInt32) { | ||||
| return NNACL_ERR; | return NNACL_ERR; | ||||
| } | } | ||||
| if (GetElementNum(input1) != 1) { | |||||
| return NNACL_ERR; | |||||
| } | |||||
| if (input1->data_ == NULL) { | if (input1->data_ == NULL) { | ||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| if (GetElementNum(input1) != 1) { | |||||
| return NNACL_ERR; | |||||
| } | |||||
| int num_elements = ((int *)(input1->data_))[0]; | int num_elements = ((int *)(input1->data_))[0]; | ||||
| ShapeSet(output->element_shape_, &(output->element_shape_size_), ele_shape_ptr, GetElementNum(input0)); | ShapeSet(output->element_shape_, &(output->element_shape_size_), ele_shape_ptr, GetElementNum(input0)); | ||||
| output->element_num_ = num_elements; | output->element_num_ = num_elements; | ||||
| @@ -51,7 +51,7 @@ int TensorListSetItemInferShape(const TensorC *const *inputs, size_t inputs_size | |||||
| output0->format_ = input0->format_; | output0->format_ = input0->format_; | ||||
| output0->tensors_data_type_ = value_tensor->data_type_; | output0->tensors_data_type_ = value_tensor->data_type_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -29,7 +29,7 @@ int TensorListStackInferShape(const TensorC *const *inputs, size_t inputs_size, | |||||
| TensorListC *input0 = (TensorListC *)(inputs[0]); | TensorListC *input0 = (TensorListC *)(inputs[0]); | ||||
| output->data_type_ = input0->tensors_data_type_; | output->data_type_ = input0->tensors_data_type_; | ||||
| output->format_ = input0->format_; | output->format_ = input0->format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| if (input0->element_num_ == 0) { | if (input0->element_num_ == 0) { | ||||
| @@ -46,7 +46,7 @@ int TileInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o | |||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -35,7 +35,7 @@ int TopKInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o | |||||
| SetDataTypeFormat(output0, input); | SetDataTypeFormat(output0, input); | ||||
| output1->data_type_ = kNumberTypeInt32; | output1->data_type_ = kNumberTypeInt32; | ||||
| output1->format_ = input->format_; | output1->format_ = input->format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| const TensorC *input_k_tensor = inputs[1]; | const TensorC *input_k_tensor = inputs[1]; | ||||
| @@ -42,7 +42,7 @@ int TransposeInferShape(const TensorC *const *inputs, size_t inputs_size, Tensor | |||||
| if (parameter->quant_type_ == QuantType_QUANT_WEIGHT) { | if (parameter->quant_type_ == QuantType_QUANT_WEIGHT) { | ||||
| output->data_type_ = kNumberTypeFloat32; | output->data_type_ = kNumberTypeFloat32; | ||||
| } | } | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -21,7 +21,7 @@ int UniformRealInferShape(const TensorC *const *inputs, size_t inputs_size, Tens | |||||
| OpParameter *parameter) { | OpParameter *parameter) { | ||||
| outputs[0]->data_type_ = kNumberTypeFloat32; | outputs[0]->data_type_ = kNumberTypeFloat32; | ||||
| outputs[0]->format_ = inputs[0]->format_; | outputs[0]->format_ = inputs[0]->format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int32_t *input_data = (int32_t *)(inputs[0]->data_); | int32_t *input_data = (int32_t *)(inputs[0]->data_); | ||||
| @@ -33,7 +33,7 @@ int UniqueInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * | |||||
| SetDataTypeFormat(output0, input); | SetDataTypeFormat(output0, input); | ||||
| output1->data_type_ = kNumberTypeInt32; | output1->data_type_ = kNumberTypeInt32; | ||||
| output1->format_ = input->format_; | output1->format_ = input->format_; | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| SetShapeTensor(output0, input); | SetShapeTensor(output0, input); | ||||
| @@ -30,7 +30,7 @@ int UnsqueezeInferShape(const TensorC *const *inputs, size_t inputs_size, Tensor | |||||
| TensorC *output = outputs[0]; | TensorC *output = outputs[0]; | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -36,7 +36,7 @@ int UnstackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC | |||||
| SetDataTypeFormat(outputs[i], input); | SetDataTypeFormat(outputs[i], input); | ||||
| } | } | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| int output_shape[MAX_SHAPE_SIZE] = {0}; | int output_shape[MAX_SHAPE_SIZE] = {0}; | ||||
| @@ -41,7 +41,7 @@ int WhereInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** | |||||
| } | } | ||||
| SetDataTypeFormat(output, input); | SetDataTypeFormat(output, input); | ||||
| if (!parameter->infer_flag_) { | |||||
| if (!InferFlag(inputs, inputs_size)) { | |||||
| return NNACL_INFER_INVALID; | return NNACL_INFER_INVALID; | ||||
| } | } | ||||
| @@ -81,7 +81,6 @@ typedef enum DataOrder { | |||||
| typedef struct OpParameter { | typedef struct OpParameter { | ||||
| char name_[100]; | char name_[100]; | ||||
| bool infer_flag_; | |||||
| int type_; | int type_; | ||||
| int thread_num_; | int thread_num_; | ||||
| int quant_type_; | int quant_type_; | ||||
| @@ -33,8 +33,7 @@ std::ostream &operator<<(std::ostream &code, const ::QuantArg &quant_arg) { | |||||
| std::ostream &operator<<(std::ostream &code, const OpParameter ¶meter) { | std::ostream &operator<<(std::ostream &code, const OpParameter ¶meter) { | ||||
| code << "{ \"\"" | code << "{ \"\"" | ||||
| << ", " << std::boolalpha << parameter.infer_flag_ << ", " << parameter.type_ << ", " << gThreadNum << ", " | |||||
| << parameter.quant_type_ << "}"; | |||||
| << ", " << parameter.type_ << ", " << gThreadNum << ", " << parameter.quant_type_ << "}"; | |||||
| return code; | return code; | ||||
| } | } | ||||
| @@ -209,12 +209,10 @@ OpParameter *CoderSession::GenParameterAndInfer(const Model::Node *node, const s | |||||
| MS_CHECK_PTR_RET_NULL(parame_gen); | MS_CHECK_PTR_RET_NULL(parame_gen); | ||||
| auto parameter = parame_gen(primitive); | auto parameter = parame_gen(primitive); | ||||
| MS_CHECK_PTR_RET_NULL(parameter); | MS_CHECK_PTR_RET_NULL(parameter); | ||||
| parameter->infer_flag_ = true; | |||||
| auto ret = KernelInferShape(inputs, outputs, parameter); | auto ret = KernelInferShape(inputs, outputs, parameter); | ||||
| if (ret == RET_INFER_INVALID) { | if (ret == RET_INFER_INVALID) { | ||||
| MS_LOG(INFO) << "InferShape shouldn't be done before runtime, name: " << node->name_ | MS_LOG(INFO) << "InferShape shouldn't be done before runtime, name: " << node->name_ | ||||
| << ", type: " << PrimitiveTypeName(GetPrimitiveType(primitive)) << "flag set to false."; | << ", type: " << PrimitiveTypeName(GetPrimitiveType(primitive)) << "flag set to false."; | ||||
| parameter->infer_flag_ = false; | |||||
| } else if (ret != RET_OK) { | } else if (ret != RET_OK) { | ||||
| MS_LOG(ERROR) << "InferShape failed, name: " << node->name_ | MS_LOG(ERROR) << "InferShape failed, name: " << node->name_ | ||||
| << ", type: " << PrimitiveTypeName(GetPrimitiveType(primitive)); | << ", type: " << PrimitiveTypeName(GetPrimitiveType(primitive)); | ||||
| @@ -108,6 +108,7 @@ int TensorList2TensorListC(TensorList *src, TensorListC *dst) { | |||||
| dst->is_ready_ = src->IsReady(); | dst->is_ready_ = src->IsReady(); | ||||
| dst->data_type_ = static_cast<TypeIdC>(src->data_type()); | dst->data_type_ = static_cast<TypeIdC>(src->data_type()); | ||||
| dst->format_ = src->format(); | dst->format_ = src->format(); | ||||
| dst->shape_value_ = src->shape().empty() ? 0 : src->shape().front(); | |||||
| dst->element_num_ = src->shape().empty() ? 0 : src->tensors().size(); | dst->element_num_ = src->shape().empty() ? 0 : src->tensors().size(); | ||||
| dst->tensors_ = reinterpret_cast<TensorC *>(malloc(dst->element_num_ * sizeof(TensorC))); | dst->tensors_ = reinterpret_cast<TensorC *>(malloc(dst->element_num_ * sizeof(TensorC))); | ||||
| @@ -88,10 +88,8 @@ int LiteKernel::FreeInWorkTensor() const { | |||||
| int LiteKernel::PreProcess() { | int LiteKernel::PreProcess() { | ||||
| if (!InferShapeDone()) { | if (!InferShapeDone()) { | ||||
| op_parameter_->infer_flag_ = true; | |||||
| auto ret = lite::KernelInferShape(in_tensors_, &out_tensors_, op_parameter_); | auto ret = lite::KernelInferShape(in_tensors_, &out_tensors_, op_parameter_); | ||||
| if (ret != 0) { | if (ret != 0) { | ||||
| op_parameter_->infer_flag_ = false; | |||||
| MS_LOG(ERROR) << "InferShape fail!"; | MS_LOG(ERROR) << "InferShape fail!"; | ||||
| return ret; | return ret; | ||||
| } | } | ||||
| @@ -188,14 +188,15 @@ class LiteKernel { | |||||
| int DecOutTensorRefCount(); | int DecOutTensorRefCount(); | ||||
| #endif | #endif | ||||
| protected: | |||||
| bool InferShapeDone() { | |||||
| if (op_parameter_ != nullptr) { | |||||
| return op_parameter_->infer_flag_; | |||||
| bool InferShapeDone() const { | |||||
| auto shape = out_tensors_.front()->shape(); | |||||
| if (std::find(shape.begin(), shape.end(), -1) != shape.end()) { | |||||
| return false; | |||||
| } | } | ||||
| return false; | |||||
| return true; | |||||
| } | } | ||||
| protected: | |||||
| KernelKey desc_{}; | KernelKey desc_{}; | ||||
| std::string name_; | std::string name_; | ||||
| OpParameter *op_parameter_ = nullptr; | OpParameter *op_parameter_ = nullptr; | ||||
| @@ -690,7 +690,6 @@ void LiteSession::ResetInputsShape(const std::vector<std::vector<int>> &dims) { | |||||
| } | } | ||||
| int LiteSession::ReSizeKernels(const std::vector<kernel::LiteKernel *> &kernels) { | int LiteSession::ReSizeKernels(const std::vector<kernel::LiteKernel *> &kernels) { | ||||
| bool infer_shape_interrupt = false; | |||||
| for (auto kernel : kernels) { | for (auto kernel : kernels) { | ||||
| if (kernel == nullptr) { | if (kernel == nullptr) { | ||||
| MS_LOG(ERROR) << "input kernel is nullptr!"; | MS_LOG(ERROR) << "input kernel is nullptr!"; | ||||
| @@ -708,11 +707,10 @@ int LiteSession::ReSizeKernels(const std::vector<kernel::LiteKernel *> &kernels) | |||||
| #endif | #endif | ||||
| } else { | } else { | ||||
| auto sub_graph = reinterpret_cast<kernel::SubGraphKernel *>(kernel); | auto sub_graph = reinterpret_cast<kernel::SubGraphKernel *>(kernel); | ||||
| ret = sub_graph->ReSize(infer_shape_interrupt); | |||||
| ret = sub_graph->ReSize(); | |||||
| } | } | ||||
| if (ret == RET_INFER_INVALID) { | if (ret == RET_INFER_INVALID) { | ||||
| MS_LOG(INFO) << "InferShape is interrupted"; | MS_LOG(INFO) << "InferShape is interrupted"; | ||||
| infer_shape_interrupt = true; | |||||
| continue; | continue; | ||||
| } | } | ||||
| if (ret != RET_OK) { | if (ret != RET_OK) { | ||||
| @@ -66,6 +66,9 @@ int KernelInferShape(const std::vector<lite::Tensor *> &inputs, std::vector<lite | |||||
| } else { | } else { | ||||
| TensorC2Tensor(out_tensors.at(i), outputs->at(i)); | TensorC2Tensor(out_tensors.at(i), outputs->at(i)); | ||||
| } | } | ||||
| if (ret == NNACL_INFER_INVALID) { | |||||
| outputs->at(i)->set_shape({-1}); | |||||
| } | |||||
| } | } | ||||
| FreeAllTensorC(&in_tensors); | FreeAllTensorC(&in_tensors); | ||||
| @@ -75,11 +75,8 @@ void GroupConvolutionBaseCPUKernel::FreeSubKernel() { | |||||
| int GroupConvolutionBaseCPUKernel::PreProcess() { | int GroupConvolutionBaseCPUKernel::PreProcess() { | ||||
| if (!InferShapeDone()) { | if (!InferShapeDone()) { | ||||
| op_parameter_->infer_flag_ = true; | |||||
| auto ret = lite::KernelInferShape(in_tensors_, &out_tensors_, op_parameter_); | auto ret = lite::KernelInferShape(in_tensors_, &out_tensors_, op_parameter_); | ||||
| if (ret != 0) { | if (ret != 0) { | ||||
| op_parameter_->infer_flag_ = false; | |||||
| MS_LOG(ERROR) << "InferShape fail!"; | MS_LOG(ERROR) << "InferShape fail!"; | ||||
| return ret; | return ret; | ||||
| } | } | ||||
| @@ -37,9 +37,10 @@ class GroupConvCreator { | |||||
| const lite::InnerContext *ctx, bool is_quant, TypeId data_type) | const lite::InnerContext *ctx, bool is_quant, TypeId data_type) | ||||
| : origin_inputs_(std::move(inputs)), | : origin_inputs_(std::move(inputs)), | ||||
| origin_outputs_(std::move(outputs)), | origin_outputs_(std::move(outputs)), | ||||
| infered_(op_parameter->infer_flag_), | |||||
| is_quant_(is_quant), | is_quant_(is_quant), | ||||
| data_type_(data_type) { | data_type_(data_type) { | ||||
| auto shape = origin_outputs_.front()->shape(); | |||||
| infered_ = std::find(shape.begin(), shape.end(), -1) == shape.end(); | |||||
| conv_param_ = reinterpret_cast<ConvParameter *>(op_parameter); | conv_param_ = reinterpret_cast<ConvParameter *>(op_parameter); | ||||
| } | } | ||||
| @@ -108,7 +108,7 @@ int ResizeBaseCPUKernel::Init() { | |||||
| auto input = in_tensors_.at(0); | auto input = in_tensors_.at(0); | ||||
| auto input_shape = input->shape(); | auto input_shape = input->shape(); | ||||
| if (!input_shape.empty() && input_shape.size() != COMM_SHAPE_SIZE) { | |||||
| if (InferShapeDone() && input_shape.size() != COMM_SHAPE_SIZE) { | |||||
| MS_LOG(ERROR) << "Resize op support input rank 4, got " << input_shape.size(); | MS_LOG(ERROR) << "Resize op support input rank 4, got " << input_shape.size(); | ||||
| return RET_ERROR; | return RET_ERROR; | ||||
| } | } | ||||