From 1e7f56f3ab10b13d340d9c5101628fec5958df29 Mon Sep 17 00:00:00 2001 From: chenjianping Date: Mon, 15 Mar 2021 10:39:55 +0800 Subject: [PATCH] support crop infershape --- mindspore/lite/micro/cmake/file_list.cmake | 1 + mindspore/lite/nnacl/infer/adam_infer.c | 3 + .../lite/nnacl/infer/add_sub_grad_infer.c | 5 + mindspore/lite/nnacl/infer/addn_infer.c | 3 + .../lite/nnacl/infer/apply_momentum_infer.c | 3 + mindspore/lite/nnacl/infer/argmin_max_infer.c | 4 + .../nnacl/infer/arithmetic_compare_infer.c | 8 + .../lite/nnacl/infer/arithmetic_grad_infer.c | 4 + mindspore/lite/nnacl/infer/arithmetic_infer.c | 16 + mindspore/lite/nnacl/infer/assert_op_infer.c | 3 + mindspore/lite/nnacl/infer/assign_add_infer.c | 3 + mindspore/lite/nnacl/infer/assign_infer.c | 3 + .../nnacl/infer/audio_spectrogram_infer.c | 3 + .../lite/nnacl/infer/batch_to_space_infer.c | 3 + mindspore/lite/nnacl/infer/bias_grad_infer.c | 3 + .../nnacl/infer/binary_cross_entropy_infer.c | 3 + mindspore/lite/nnacl/infer/bn_grad_infer.c | 3 + .../lite/nnacl/infer/broadcast_to_infer.c | 3 + mindspore/lite/nnacl/infer/cast_infer.c | 3 + mindspore/lite/nnacl/infer/common_infer.c | 46 +++ mindspore/lite/nnacl/infer/concat_infer.c | 3 + .../nnacl/infer/constant_of_shape_infer.c | 3 + .../nnacl/infer/conv2d_grad_filter_infer.c | 3 + .../nnacl/infer/conv2d_grad_input_infer.c | 3 + mindspore/lite/nnacl/infer/conv2d_infer.c | 4 + .../lite/nnacl/infer/crop_and_resize_infer.c | 3 + mindspore/lite/nnacl/infer/crop_infer.c | 3 + .../infer/custom_extract_features_infer.c | 3 + .../lite/nnacl/infer/custom_normalize_infer.c | 3 + .../lite/nnacl/infer/custom_predict_infer.c | 3 + mindspore/lite/nnacl/infer/deconv2d_infer.c | 3 + .../nnacl/infer/dedepthwise_conv2d_infer.c | 1 + .../lite/nnacl/infer/depth_to_space_infer.c | 3 + .../lite/nnacl/infer/depthwise_conv2d_infer.c | 1 + .../infer/detection_post_process_infer.c | 3 + .../lite/nnacl/infer/dropout_grad_infer.c | 3 + mindspore/lite/nnacl/infer/dropout_infer.c | 3 + .../lite/nnacl/infer/embedding_lookup_infer.c | 3 + .../lite/nnacl/infer/expand_dims_infer.c | 3 + mindspore/lite/nnacl/infer/fft_imag_infer.c | 3 + mindspore/lite/nnacl/infer/fft_real_infer.c | 3 + mindspore/lite/nnacl/infer/fill_infer.c | 3 + .../lite/nnacl/infer/flatten_grad_infer.c | 3 + mindspore/lite/nnacl/infer/flatten_infer.c | 3 + .../lite/nnacl/infer/full_connection_infer.c | 3 + .../lite/nnacl/infer/fused_batchnorm_infer.c | 3 + mindspore/lite/nnacl/infer/gather_infer.c | 3 + mindspore/lite/nnacl/infer/gather_nd_infer.c | 3 + .../infer/group_conv2d_grad_input_infer.c | 1 + mindspore/lite/nnacl/infer/gru_infer.c | 3 + .../lite/nnacl/infer/hashtable_lookup_infer.c | 3 + mindspore/lite/nnacl/infer/infer.h | 33 ++ mindspore/lite/nnacl/infer/infer_register.c | 31 ++ mindspore/lite/nnacl/infer/infer_register.h | 226 +++++++++++ .../nnacl/infer/invert_permutation_infer.c | 3 + .../lite/nnacl/infer/layer_norm_grad_infer.c | 3 + mindspore/lite/nnacl/infer/layer_norm_infer.c | 3 + mindspore/lite/nnacl/infer/lin_space_infer.c | 3 + .../lite/nnacl/infer/lsh_projection_infer.c | 3 + mindspore/lite/nnacl/infer/lstm_infer.c | 3 + mindspore/lite/nnacl/infer/matmul_infer.c | 3 + .../lite/nnacl/infer/maximum_grad_infer.c | 3 + mindspore/lite/nnacl/infer/mean_infer.c | 1 + mindspore/lite/nnacl/infer/merge_infer.c | 3 + mindspore/lite/nnacl/infer/mfcc_infer.c | 3 + .../nnacl/infer/non_max_suppression_infer.c | 3 + mindspore/lite/nnacl/infer/one_hot_infer.c | 3 + mindspore/lite/nnacl/infer/pad_infer.c | 3 + mindspore/lite/nnacl/infer/partial_infer.c | 3 + .../lite/nnacl/infer/pooling_grad_infer.c | 4 + mindspore/lite/nnacl/infer/pooling_infer.c | 4 + mindspore/lite/nnacl/infer/power_infer.c | 3 + mindspore/lite/nnacl/infer/prior_box_infer.c | 3 + .../lite/nnacl/infer/quant_dtype_cast_infer.c | 3 + .../infer/random_standard_normal_infer.c | 3 + mindspore/lite/nnacl/infer/range_infer.c | 3 + mindspore/lite/nnacl/infer/rank_infer.c | 3 + mindspore/lite/nnacl/infer/reduce_infer.c | 3 + mindspore/lite/nnacl/infer/reshape_infer.c | 3 + mindspore/lite/nnacl/infer/resize_infer.c | 3 + mindspore/lite/nnacl/infer/rfft_infer.c | 4 + .../lite/nnacl/infer/roi_pooling_infer.c | 3 + mindspore/lite/nnacl/infer/scatter_nd_infer.c | 3 + mindspore/lite/nnacl/infer/select_infer.c | 3 + mindspore/lite/nnacl/infer/sgd_infer.c | 3 + mindspore/lite/nnacl/infer/shape_infer.c | 3 + mindspore/lite/nnacl/infer/size_infer.c | 3 + mindspore/lite/nnacl/infer/skip_gram_infer.c | 3 + mindspore/lite/nnacl/infer/slice_infer.c | 3 + .../nnacl/infer/softmax_cross_entropy_infer.c | 3 + mindspore/lite/nnacl/infer/softmax_infer.c | 3 + .../lite/nnacl/infer/space_to_batch_infer.c | 3 + .../nnacl/infer/space_to_batch_nd_infer.c | 3 + .../lite/nnacl/infer/space_to_depth_infer.c | 3 + ..._softmax_cross_entropy_with_logits_infer.c | 4 + .../lite/nnacl/infer/sparse_to_dense_infer.c | 3 + mindspore/lite/nnacl/infer/splice_infer.c | 1 + mindspore/lite/nnacl/infer/split_infer.c | 3 + mindspore/lite/nnacl/infer/squeeze_infer.c | 3 + mindspore/lite/nnacl/infer/stack_infer.c | 3 + .../nnacl/infer/strided_slice_grad_infer.c | 3 + .../lite/nnacl/infer/strided_slice_infer.c | 3 + mindspore/lite/nnacl/infer/switch_infer.c | 3 + .../nnacl/infer/tensorlist_fromtensor_infer.c | 3 + .../nnacl/infer/tensorlist_getitem_infer.c | 3 + .../nnacl/infer/tensorlist_reserve_infer.c | 3 + .../nnacl/infer/tensorlist_setitem_infer.c | 3 + .../lite/nnacl/infer/tensorlist_stack_infer.c | 3 + mindspore/lite/nnacl/infer/tile_infer.c | 3 + mindspore/lite/nnacl/infer/topk_infer.c | 3 + mindspore/lite/nnacl/infer/transpose_infer.c | 3 + .../lite/nnacl/infer/uniform_real_infer.c | 3 + mindspore/lite/nnacl/infer/unique_infer.c | 3 + .../nnacl/infer/unsorted_segment_sum_infer.c | 3 + mindspore/lite/nnacl/infer/unsqueeze_infer.c | 3 + mindspore/lite/nnacl/infer/unstack_infer.c | 3 + mindspore/lite/nnacl/infer/where_infer.c | 3 + mindspore/lite/nnacl/infer/while_infer.c | 3 + mindspore/lite/schema/ops.fbs | 1 - mindspore/lite/src/CMakeLists.txt | 17 +- mindspore/lite/src/runtime/infer_manager.cc | 357 +----------------- mindspore/lite/src/runtime/infer_manager.h | 3 +- 122 files changed, 706 insertions(+), 365 deletions(-) create mode 100644 mindspore/lite/nnacl/infer/infer.h create mode 100644 mindspore/lite/nnacl/infer/infer_register.c create mode 100644 mindspore/lite/nnacl/infer/infer_register.h diff --git a/mindspore/lite/micro/cmake/file_list.cmake b/mindspore/lite/micro/cmake/file_list.cmake index 22b84b341a..db73d2b90a 100644 --- a/mindspore/lite/micro/cmake/file_list.cmake +++ b/mindspore/lite/micro/cmake/file_list.cmake @@ -242,6 +242,7 @@ set(LITE_KERNEL_SRC ${LITE_DIR}/nnacl/infer/group_conv2d_grad_input_infer.c ${LITE_DIR}/nnacl/infer/gru_infer.c ${LITE_DIR}/nnacl/infer/hashtable_lookup_infer.c + ${LITE_DIR}/nnacl/infer/infer_register.c ${LITE_DIR}/nnacl/infer/invert_permutation_infer.c ${LITE_DIR}/nnacl/infer/layer_norm_infer.c ${LITE_DIR}/nnacl/infer/layer_norm_grad_infer.c diff --git a/mindspore/lite/nnacl/infer/adam_infer.c b/mindspore/lite/nnacl/infer/adam_infer.c index 458c652a45..8dd80cf8d0 100644 --- a/mindspore/lite/nnacl/infer/adam_infer.c +++ b/mindspore/lite/nnacl/infer/adam_infer.c @@ -14,6 +14,7 @@ * limitations under the License. */ #include "nnacl/infer/adam_infer.h" +#include "nnacl/infer/infer_register.h" int AdamInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -39,3 +40,5 @@ int AdamInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o return NNACL_OK; } + +REG_INFER(Adam, PrimType_Adam, AdamInferShape) diff --git a/mindspore/lite/nnacl/infer/add_sub_grad_infer.c b/mindspore/lite/nnacl/infer/add_sub_grad_infer.c index c61e130147..2b0f5e549c 100644 --- a/mindspore/lite/nnacl/infer/add_sub_grad_infer.c +++ b/mindspore/lite/nnacl/infer/add_sub_grad_infer.c @@ -16,6 +16,8 @@ #include "nnacl/infer/add_sub_grad_infer.h" #include "nnacl/arithmetic.h" +#include "nnacl/infer/arithmetic_grad_infer.h" +#include "nnacl/infer/infer_register.h" int AddSubGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -58,3 +60,6 @@ int AddSubGradInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso SetDataTypeFormat(dx2, dy); return NNACL_OK; } + +REG_INFER(AddGrad, PrimType_AddGrad, AddSubGradInferShape) +REG_INFER(SubGrad, PrimType_SubGrad, AddSubGradInferShape) diff --git a/mindspore/lite/nnacl/infer/addn_infer.c b/mindspore/lite/nnacl/infer/addn_infer.c index ebdcba8828..07133d1892 100644 --- a/mindspore/lite/nnacl/infer/addn_infer.c +++ b/mindspore/lite/nnacl/infer/addn_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/addn_infer.h" +#include "nnacl/infer/infer_register.h" int AddnInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -71,3 +72,5 @@ int AddnInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o return NNACL_OK; } + +REG_INFER(AddN, PrimType_AddN, AddnInferShape) diff --git a/mindspore/lite/nnacl/infer/apply_momentum_infer.c b/mindspore/lite/nnacl/infer/apply_momentum_infer.c index 78dee0ad3b..12d3872a7d 100644 --- a/mindspore/lite/nnacl/infer/apply_momentum_infer.c +++ b/mindspore/lite/nnacl/infer/apply_momentum_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/apply_momentum_infer.h" +#include "nnacl/infer/infer_register.h" int ApplyMomentumInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -42,3 +43,5 @@ int ApplyMomentumInferShape(const TensorC *const *inputs, size_t inputs_size, Te return NNACL_OK; } + +REG_INFER(ApplyMomentum, PrimType_ApplyMomentum, ApplyMomentumInferShape) diff --git a/mindspore/lite/nnacl/infer/argmin_max_infer.c b/mindspore/lite/nnacl/infer/argmin_max_infer.c index e263cfdeee..121cf4065a 100644 --- a/mindspore/lite/nnacl/infer/argmin_max_infer.c +++ b/mindspore/lite/nnacl/infer/argmin_max_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/argmin_max_infer.h" +#include "nnacl/infer/infer_register.h" int ArgMinMaxInferShape(const TensorC *const *inputs, const size_t inputs_size, TensorC **outputs, const size_t outputs_size, OpParameter *parameter) { @@ -73,3 +74,6 @@ int ArgMinMaxInferShape(const TensorC *const *inputs, const size_t inputs_size, } return NNACL_OK; } + +REG_INFER(ArgMin, PrimType_ArgMinFusion, ArgMinMaxInferShape) +REG_INFER(ArgMax, PrimType_ArgMaxFusion, ArgMinMaxInferShape) diff --git a/mindspore/lite/nnacl/infer/arithmetic_compare_infer.c b/mindspore/lite/nnacl/infer/arithmetic_compare_infer.c index 9d3c812285..655a158250 100644 --- a/mindspore/lite/nnacl/infer/arithmetic_compare_infer.c +++ b/mindspore/lite/nnacl/infer/arithmetic_compare_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/arithmetic_compare_infer.h" +#include "nnacl/infer/infer_register.h" int ArithmeticCompareInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -26,3 +27,10 @@ int ArithmeticCompareInferShape(const TensorC *const *inputs, size_t inputs_size output->data_type_ = kNumberTypeBool; return res; } + +REG_INFER(Equal, PrimType_Equal, ArithmeticCompareInferShape) +REG_INFER(Greater, PrimType_Greater, ArithmeticCompareInferShape) +REG_INFER(GreaterEqual, PrimType_GreaterEqual, ArithmeticCompareInferShape) +REG_INFER(Less, PrimType_Less, ArithmeticCompareInferShape) +REG_INFER(LessEqual, PrimType_LessEqual, ArithmeticCompareInferShape) +REG_INFER(NotEqual, PrimType_NotEqual, ArithmeticCompareInferShape) diff --git a/mindspore/lite/nnacl/infer/arithmetic_grad_infer.c b/mindspore/lite/nnacl/infer/arithmetic_grad_infer.c index 11adf06b6b..ed00572b17 100644 --- a/mindspore/lite/nnacl/infer/arithmetic_grad_infer.c +++ b/mindspore/lite/nnacl/infer/arithmetic_grad_infer.c @@ -16,6 +16,7 @@ #include "nnacl/infer/arithmetic_grad_infer.h" #include "nnacl/arithmetic.h" +#include "nnacl/infer/infer_register.h" /* * the Arithmetic Grad op include AddGrad, SubGrad, MulGrad, DivGrad, MaximumGrad, MinimumGrad @@ -99,3 +100,6 @@ int ArithmeticGradInferShape(const TensorC *const *inputs, size_t inputs_size, T dx2->data_type_ = dy->data_type_; return NNACL_OK; } + +REG_INFER(DivGrad, PrimType_DivGrad, ArithmeticGradInferShape) +REG_INFER(MulGrad, PrimType_MulGrad, ArithmeticGradInferShape) diff --git a/mindspore/lite/nnacl/infer/arithmetic_infer.c b/mindspore/lite/nnacl/infer/arithmetic_infer.c index 216c35a6e7..9e4f4541d9 100644 --- a/mindspore/lite/nnacl/infer/arithmetic_infer.c +++ b/mindspore/lite/nnacl/infer/arithmetic_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/arithmetic_infer.h" +#include "nnacl/infer/infer_register.h" int ArithmeticInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -117,3 +118,18 @@ int ArithmeticInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso } return NNACL_OK; } + +REG_INFER(Add, PrimType_AddFusion, ArithmeticInferShape) +REG_INFER(Div, PrimType_DivFusion, ArithmeticInferShape) +REG_INFER(Eltwise, PrimType_Eltwise, ArithmeticInferShape) +REG_INFER(FloorDiv, PrimType_FloorDiv, ArithmeticInferShape) +REG_INFER(FloorMod, PrimType_FloorMod, ArithmeticInferShape) +REG_INFER(LogicalAnd, PrimType_LogicalAnd, ArithmeticInferShape) +REG_INFER(LogicalOr, PrimType_LogicalOr, ArithmeticInferShape) +REG_INFER(Maximum, PrimType_Maximum, ArithmeticInferShape) +REG_INFER(Minimum, PrimType_Minimum, ArithmeticInferShape) +REG_INFER(Mod, PrimType_Mod, ArithmeticInferShape) +REG_INFER(Mul, PrimType_MulFusion, ArithmeticInferShape) +REG_INFER(RealDiv, PrimType_RealDiv, ArithmeticInferShape) +REG_INFER(Sub, PrimType_SubFusion, ArithmeticInferShape) +REG_INFER(SquaredDifference, PrimType_SquaredDifference, ArithmeticInferShape) diff --git a/mindspore/lite/nnacl/infer/assert_op_infer.c b/mindspore/lite/nnacl/infer/assert_op_infer.c index 5fabc13637..35380ba319 100644 --- a/mindspore/lite/nnacl/infer/assert_op_infer.c +++ b/mindspore/lite/nnacl/infer/assert_op_infer.c @@ -15,8 +15,11 @@ */ #include "nnacl/infer/assert_op_infer.h" +#include "nnacl/infer/infer_register.h" int AssertOpInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { return NNACL_OK; } + +REG_INFER(Assert, PrimType_Assert, AssertOpInferShape) diff --git a/mindspore/lite/nnacl/infer/assign_add_infer.c b/mindspore/lite/nnacl/infer/assign_add_infer.c index a52e7a8760..b4fdb352ba 100644 --- a/mindspore/lite/nnacl/infer/assign_add_infer.c +++ b/mindspore/lite/nnacl/infer/assign_add_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/assign_add_infer.h" +#include "nnacl/infer/infer_register.h" int AssignAddInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -35,3 +36,5 @@ int AssignAddInferShape(const TensorC *const *inputs, size_t inputs_size, Tensor SetShapeTensor(out, x); return NNACL_OK; } + +REG_INFER(AssignAdd, PrimType_AssignAdd, AssignAddInferShape) diff --git a/mindspore/lite/nnacl/infer/assign_infer.c b/mindspore/lite/nnacl/infer/assign_infer.c index 9f83782e2e..5dcc5d4ff3 100644 --- a/mindspore/lite/nnacl/infer/assign_infer.c +++ b/mindspore/lite/nnacl/infer/assign_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/assign_infer.h" +#include "nnacl/infer/infer_register.h" int AssignInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -37,3 +38,5 @@ int AssignInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * } return NNACL_OK; } + +REG_INFER(Assign, PrimType_Assign, AssignInferShape) diff --git a/mindspore/lite/nnacl/infer/audio_spectrogram_infer.c b/mindspore/lite/nnacl/infer/audio_spectrogram_infer.c index c4e256ca41..8eaad1aa12 100644 --- a/mindspore/lite/nnacl/infer/audio_spectrogram_infer.c +++ b/mindspore/lite/nnacl/infer/audio_spectrogram_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/audio_spectrogram_infer.h" +#include "nnacl/infer/infer_register.h" int Log2Ceil(uint32_t length) { if (length == 0) { @@ -72,3 +73,5 @@ int AudioSpectrogramInferShape(const TensorC *const *inputs, size_t inputs_size, SetShapeArray(output, output_shape, 3); return NNACL_OK; } + +REG_INFER(AudioSpectrogram, PrimType_AudioSpectrogram, AudioSpectrogramInferShape) diff --git a/mindspore/lite/nnacl/infer/batch_to_space_infer.c b/mindspore/lite/nnacl/infer/batch_to_space_infer.c index b7910a7257..5746ddf714 100644 --- a/mindspore/lite/nnacl/infer/batch_to_space_infer.c +++ b/mindspore/lite/nnacl/infer/batch_to_space_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/batch_to_space_infer.h" +#include "nnacl/infer/infer_register.h" int SetOutputShapeFromParam(const TensorC *const *inputs, TensorC **outputs, OpParameter *parameter) { int input_shape[MAX_SHAPE_SIZE]; @@ -135,3 +136,5 @@ int BatchToSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, Ten return NNACL_OK; } + +REG_INFER(BatchToSpace, PrimType_BatchToSpace, BatchToSpaceInferShape) diff --git a/mindspore/lite/nnacl/infer/bias_grad_infer.c b/mindspore/lite/nnacl/infer/bias_grad_infer.c index a1206872d9..575a8f2f50 100644 --- a/mindspore/lite/nnacl/infer/bias_grad_infer.c +++ b/mindspore/lite/nnacl/infer/bias_grad_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/bias_grad_infer.h" +#include "nnacl/infer/infer_register.h" int BiasGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -40,3 +41,5 @@ int BiasGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC return NNACL_OK; } + +REG_INFER(BiasAddGrad, PrimType_BiasAddGrad, BiasGradInferShape) diff --git a/mindspore/lite/nnacl/infer/binary_cross_entropy_infer.c b/mindspore/lite/nnacl/infer/binary_cross_entropy_infer.c index 55a6342138..f9f8fb234e 100644 --- a/mindspore/lite/nnacl/infer/binary_cross_entropy_infer.c +++ b/mindspore/lite/nnacl/infer/binary_cross_entropy_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/binary_cross_entropy_infer.h" +#include "nnacl/infer/infer_register.h" int BinaryCrossEntropyInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -31,3 +32,5 @@ int BinaryCrossEntropyInferShape(const TensorC *const *inputs, size_t inputs_siz } return NNACL_OK; } + +REG_INFER(BinaryCrossEntropy, PrimType_BinaryCrossEntropy, BinaryCrossEntropyInferShape) diff --git a/mindspore/lite/nnacl/infer/bn_grad_infer.c b/mindspore/lite/nnacl/infer/bn_grad_infer.c index 50ab35f740..718a9421f1 100644 --- a/mindspore/lite/nnacl/infer/bn_grad_infer.c +++ b/mindspore/lite/nnacl/infer/bn_grad_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/bn_grad_infer.h" +#include "nnacl/infer/infer_register.h" int BnGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -39,3 +40,5 @@ int BnGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * SetDataTypeFormat(outputs[2], scale); return NNACL_OK; } + +REG_INFER(BatchNormGrad, PrimType_BatchNormGrad, BnGradInferShape) diff --git a/mindspore/lite/nnacl/infer/broadcast_to_infer.c b/mindspore/lite/nnacl/infer/broadcast_to_infer.c index 0580e3b301..6959bde80e 100644 --- a/mindspore/lite/nnacl/infer/broadcast_to_infer.c +++ b/mindspore/lite/nnacl/infer/broadcast_to_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/broadcast_to_infer.h" +#include "nnacl/infer/infer_register.h" int BroadcastToInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -66,3 +67,5 @@ int BroadcastToInferShape(const TensorC *const *inputs, size_t inputs_size, Tens SetShapeArray(outputs[0], shape, shape_size); return NNACL_OK; } + +REG_INFER(BroadcastTo, PrimType_BroadcastTo, BroadcastToInferShape) diff --git a/mindspore/lite/nnacl/infer/cast_infer.c b/mindspore/lite/nnacl/infer/cast_infer.c index db1afe0e42..2474d5cb0a 100644 --- a/mindspore/lite/nnacl/infer/cast_infer.c +++ b/mindspore/lite/nnacl/infer/cast_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/cast_infer.h" +#include "nnacl/infer/infer_register.h" int CastInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -42,3 +43,5 @@ int CastInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o SetShapeTensor(output, input); return NNACL_OK; } + +REG_INFER(Cast, PrimType_Cast, CastInferShape) diff --git a/mindspore/lite/nnacl/infer/common_infer.c b/mindspore/lite/nnacl/infer/common_infer.c index 0a21247c95..0a9be294c7 100644 --- a/mindspore/lite/nnacl/infer/common_infer.c +++ b/mindspore/lite/nnacl/infer/common_infer.c @@ -16,6 +16,7 @@ #include "nnacl/infer/common_infer.h" #include #include +#include "nnacl/infer/infer_register.h" int MallocTensorListData(TensorListC *tensor_list, TypeIdC dtype, vvector *tensor_shape) { // This function will create a new tensors_ @@ -437,3 +438,48 @@ void VectorCFree(VectorC *vc) { free(vc->data_); vc->data_ = NULL; } + +REG_INFER(Abs, PrimType_Abs, CommonInferShape) +REG_INFER(AbsGrad, PrimType_AbsGrad, CommonInferShape) +REG_INFER(Activation, PrimType_Activation, CommonInferShape) +REG_INFER(ActivationGrad, PrimType_ActivationGrad, CommonInferShape) +REG_INFER(BatchNorm, PrimType_BatchNorm, CommonInferShape) +REG_INFER(BinaryCrossEntropyGrad, PrimType_BinaryCrossEntropyGrad, CommonInferShape) +REG_INFER(BiasAdd, PrimType_BiasAdd, CommonInferShape) +REG_INFER(Ceil, PrimType_Ceil, CommonInferShape) +REG_INFER(Clip, PrimType_Clip, CommonInferShape) +REG_INFER(ControlDepend, PrimType_ControlDepend, CommonInferShape) +REG_INFER(Cos, PrimType_Cos, CommonInferShape) +REG_INFER(Depend, PrimType_Depend, CommonInferShape) +REG_INFER(Elu, PrimType_Elu, CommonInferShape) +REG_INFER(Erf, PrimType_Erf, CommonInferShape) +REG_INFER(Exp, PrimType_ExpFusion, CommonInferShape) +REG_INFER(FakeQuantWithMinMaxVars, PrimType_FakeQuantWithMinMaxVars, CommonInferShape) +REG_INFER(Floor, PrimType_Floor, CommonInferShape) +REG_INFER(If, PrimType_If, CommonInferShape) +REG_INFER(InstanceNorm, PrimType_InstanceNorm, CommonInferShape) +REG_INFER(IsFinite, PrimType_IsFinite, CommonInferShape) +REG_INFER(LeakyRelu, PrimType_LeakyRelu, CommonInferShape) +REG_INFER(Log, PrimType_Log, CommonInferShape) +REG_INFER(LogGrad, PrimType_LogGrad, CommonInferShape) +REG_INFER(LogicalNot, PrimType_LogicalNot, CommonInferShape) +REG_INFER(LRN, PrimType_LRN, CommonInferShape) +REG_INFER(L2Normalize, PrimType_L2NormalizeFusion, CommonInferShape) +REG_INFER(Neg, PrimType_Neg, CommonInferShape) +REG_INFER(NegGrad, PrimType_NegGrad, CommonInferShape) +REG_INFER(PowerGrad, PrimType_PowerGrad, CommonInferShape) +REG_INFER(PReLU, PrimType_PReLUFusion, CommonInferShape) +REG_INFER(Reciprocal, PrimType_Reciprocal, CommonInferShape) +REG_INFER(ReverseSequence, PrimType_ReverseSequence, CommonInferShape) +REG_INFER(Reverse, PrimType_ReverseV2, CommonInferShape) +REG_INFER(Round, PrimType_Round, CommonInferShape) +REG_INFER(Rsqrt, PrimType_Rsqrt, CommonInferShape) +REG_INFER(Scale, PrimType_ScaleFusion, CommonInferShape) +REG_INFER(SigmoidCrossEntropyWithLogits, PrimType_SigmoidCrossEntropyWithLogits, CommonInferShape) +REG_INFER(SigmoidCrossEntropyWithLogitsGrad, PrimType_SigmoidCrossEntropyWithLogitsGrad, CommonInferShape) +REG_INFER(Sin, PrimType_Sin, CommonInferShape) +REG_INFER(SmoothL1Loss, PrimType_SmoothL1Loss, CommonInferShape) +REG_INFER(SmoothL1LossGrad, PrimType_SmoothL1LossGrad, CommonInferShape) +REG_INFER(Sqrt, PrimType_Sqrt, CommonInferShape) +REG_INFER(Square, PrimType_Square, CommonInferShape) +REG_INFER(ZerosLike, PrimType_ZerosLike, CommonInferShape) diff --git a/mindspore/lite/nnacl/infer/concat_infer.c b/mindspore/lite/nnacl/infer/concat_infer.c index d5c3fa7d2d..4729a0aa87 100644 --- a/mindspore/lite/nnacl/infer/concat_infer.c +++ b/mindspore/lite/nnacl/infer/concat_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/concat_infer.h" +#include "nnacl/infer/infer_register.h" int ConcatInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -73,3 +74,5 @@ int ConcatInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * SetShapeArray(outputs[0], output_shape, output_shape_size); return NNACL_OK; } + +REG_INFER(Concat, PrimType_Concat, ConcatInferShape) diff --git a/mindspore/lite/nnacl/infer/constant_of_shape_infer.c b/mindspore/lite/nnacl/infer/constant_of_shape_infer.c index 2f182014f1..1dd438e29e 100644 --- a/mindspore/lite/nnacl/infer/constant_of_shape_infer.c +++ b/mindspore/lite/nnacl/infer/constant_of_shape_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/constant_of_shape_infer.h" +#include "nnacl/infer/infer_register.h" int ConstantOfShapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -64,3 +65,5 @@ int ConstantOfShapeInferShape(const TensorC *const *inputs, size_t inputs_size, SetShapeArray(out_tensor, out_shape, out_shape_size); return NNACL_OK; } + +REG_INFER(ConstantOfShape, PrimType_ConstantOfShape, ConstantOfShapeInferShape) diff --git a/mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.c b/mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.c index 6f2bfeeb8e..a28bf72a0a 100644 --- a/mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.c +++ b/mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/conv2d_grad_filter_infer.h" +#include "nnacl/infer/infer_register.h" int Conv2dGradFilterInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -33,3 +34,5 @@ int Conv2dGradFilterInferShape(const TensorC *const *inputs, size_t inputs_size, SetShapeArray(outputs[0], filter_shape_, filter_shape_size_); return NNACL_OK; } + +REG_INFER(Conv2DBackpropFilterFusion, PrimType_Conv2DBackpropFilterFusion, Conv2dGradFilterInferShape) diff --git a/mindspore/lite/nnacl/infer/conv2d_grad_input_infer.c b/mindspore/lite/nnacl/infer/conv2d_grad_input_infer.c index 5aa6f929a4..9b7d43c4a2 100644 --- a/mindspore/lite/nnacl/infer/conv2d_grad_input_infer.c +++ b/mindspore/lite/nnacl/infer/conv2d_grad_input_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/conv2d_grad_input_infer.h" +#include "nnacl/infer/infer_register.h" int Conv2dGradInputInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -39,3 +40,5 @@ int Conv2dGradInputInferShape(const TensorC *const *inputs, size_t inputs_size, return NNACL_OK; } + +REG_INFER(Conv2DBackpropInputFusion, PrimType_Conv2DBackpropInputFusion, Conv2dGradInputInferShape) diff --git a/mindspore/lite/nnacl/infer/conv2d_infer.c b/mindspore/lite/nnacl/infer/conv2d_infer.c index 3e807db8e4..d945a2be47 100644 --- a/mindspore/lite/nnacl/infer/conv2d_infer.c +++ b/mindspore/lite/nnacl/infer/conv2d_infer.c @@ -14,6 +14,7 @@ * limitations under the License. */ #include "nnacl/infer/conv2d_infer.h" +#include "nnacl/infer/infer_register.h" void ConvInferShape(int input_h, int input_w, int *output_h, int *output_w, ConvParameter *param) { int kernel_w = param->kernel_w_; @@ -100,3 +101,6 @@ int Conv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * return NNACL_OK; } + +REG_INFER(Adder, PrimType_AdderFusion, Conv2dInferShape) +REG_INFER(Conv2D, PrimType_Conv2DFusion, Conv2dInferShape) diff --git a/mindspore/lite/nnacl/infer/crop_and_resize_infer.c b/mindspore/lite/nnacl/infer/crop_and_resize_infer.c index 879be97d4a..5b4f39c590 100644 --- a/mindspore/lite/nnacl/infer/crop_and_resize_infer.c +++ b/mindspore/lite/nnacl/infer/crop_and_resize_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/crop_and_resize_infer.h" +#include "nnacl/infer/infer_register.h" int CropAndResizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -56,3 +57,5 @@ int CropAndResizeInferShape(const TensorC *const *inputs, size_t inputs_size, Te SetShapeArray(output, output_shape, output_shape_size); return NNACL_OK; } + +REG_INFER(CropAndResize, PrimType_CropAndResize, CropAndResizeInferShape) diff --git a/mindspore/lite/nnacl/infer/crop_infer.c b/mindspore/lite/nnacl/infer/crop_infer.c index db5545dc0e..b1e70c1df5 100644 --- a/mindspore/lite/nnacl/infer/crop_infer.c +++ b/mindspore/lite/nnacl/infer/crop_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/crop_infer.h" +#include "nnacl/infer/infer_register.h" int CropInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -32,3 +33,5 @@ int CropInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o SetShapeTensor(outputs[0], inputs[1]); return NNACL_OK; } + +REG_INFER(Crop, PrimType_Crop, CropInferShape) diff --git a/mindspore/lite/nnacl/infer/custom_extract_features_infer.c b/mindspore/lite/nnacl/infer/custom_extract_features_infer.c index 72539bdbea..7edbcf0ae2 100644 --- a/mindspore/lite/nnacl/infer/custom_extract_features_infer.c +++ b/mindspore/lite/nnacl/infer/custom_extract_features_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/custom_extract_features_infer.h" +#include "nnacl/infer/infer_register.h" int CustomExtractFeaturesInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -46,3 +47,5 @@ int CustomExtractFeaturesInferShape(const TensorC *const *inputs, size_t inputs_ output1->shape_[0] = res; return NNACL_OK; } + +REG_INFER(CustomExtractFeatures, PrimType_CustomExtractFeatures, CustomExtractFeaturesInferShape) diff --git a/mindspore/lite/nnacl/infer/custom_normalize_infer.c b/mindspore/lite/nnacl/infer/custom_normalize_infer.c index 51909b66ed..143d405af8 100644 --- a/mindspore/lite/nnacl/infer/custom_normalize_infer.c +++ b/mindspore/lite/nnacl/infer/custom_normalize_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/custom_normalize_infer.h" +#include "nnacl/infer/infer_register.h" int CustomNormalizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -39,3 +40,5 @@ int CustomNormalizeInferShape(const TensorC *const *inputs, size_t inputs_size, output->shape_[0] = (string_num == 0 ? 1 : string_num); return NNACL_OK; } + +REG_INFER(CustomNormalize, PrimType_CustomNormalize, CustomNormalizeInferShape) diff --git a/mindspore/lite/nnacl/infer/custom_predict_infer.c b/mindspore/lite/nnacl/infer/custom_predict_infer.c index 6ece4be71f..5633823c57 100644 --- a/mindspore/lite/nnacl/infer/custom_predict_infer.c +++ b/mindspore/lite/nnacl/infer/custom_predict_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/custom_predict_infer.h" +#include "nnacl/infer/infer_register.h" int CustomPredictInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -40,3 +41,5 @@ int CustomPredictInferShape(const TensorC *const *inputs, size_t inputs_size, Te output1->format_ = input->format_; return NNACL_OK; } + +REG_INFER(CustomPredict, PrimType_CustomPredict, CustomPredictInferShape) diff --git a/mindspore/lite/nnacl/infer/deconv2d_infer.c b/mindspore/lite/nnacl/infer/deconv2d_infer.c index 1c39ea48f5..5f77ecc1f7 100644 --- a/mindspore/lite/nnacl/infer/deconv2d_infer.c +++ b/mindspore/lite/nnacl/infer/deconv2d_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/deconv2d_infer.h" +#include "nnacl/infer/infer_register.h" int Deconv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -97,3 +98,5 @@ int Deconv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC param->output_channel_ = output_c; return NNACL_OK; } + +REG_INFER(Conv2dTranspose, PrimType_Conv2dTransposeFusion, Deconv2dInferShape) diff --git a/mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.c b/mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.c index 20ac637687..eaef11caf4 100644 --- a/mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.c +++ b/mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/dedepthwise_conv2d_infer.h" +#include "nnacl/infer/infer_register.h" int DeDepthwiseConv2DInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { diff --git a/mindspore/lite/nnacl/infer/depth_to_space_infer.c b/mindspore/lite/nnacl/infer/depth_to_space_infer.c index 4f7fc277bf..65a1105c78 100644 --- a/mindspore/lite/nnacl/infer/depth_to_space_infer.c +++ b/mindspore/lite/nnacl/infer/depth_to_space_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/depth_to_space_infer.h" +#include "nnacl/infer/infer_register.h" int DepthToSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -54,3 +55,5 @@ int DepthToSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, Ten SetShapeArray(outputs[0], output_shape, output_shape_size); return NNACL_OK; } + +REG_INFER(DepthToSpace, PrimType_DepthToSpace, DepthToSpaceInferShape) diff --git a/mindspore/lite/nnacl/infer/depthwise_conv2d_infer.c b/mindspore/lite/nnacl/infer/depthwise_conv2d_infer.c index 7ae3aeb564..866524d127 100644 --- a/mindspore/lite/nnacl/infer/depthwise_conv2d_infer.c +++ b/mindspore/lite/nnacl/infer/depthwise_conv2d_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/depthwise_conv2d_infer.h" +#include "nnacl/infer/infer_register.h" int DepthwiseConv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { diff --git a/mindspore/lite/nnacl/infer/detection_post_process_infer.c b/mindspore/lite/nnacl/infer/detection_post_process_infer.c index aff19762c1..f5cd701655 100644 --- a/mindspore/lite/nnacl/infer/detection_post_process_infer.c +++ b/mindspore/lite/nnacl/infer/detection_post_process_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/detection_post_process_infer.h" +#include "nnacl/infer/infer_register.h" int DetectionPostProcessInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -77,3 +78,5 @@ int DetectionPostProcessInferShape(const TensorC *const *inputs, size_t inputs_s return NNACL_OK; } + +REG_INFER(DetectionPostProcess, PrimType_DetectionPostProcess, DetectionPostProcessInferShape) diff --git a/mindspore/lite/nnacl/infer/dropout_grad_infer.c b/mindspore/lite/nnacl/infer/dropout_grad_infer.c index 7d7f8e2ce0..9afe933267 100644 --- a/mindspore/lite/nnacl/infer/dropout_grad_infer.c +++ b/mindspore/lite/nnacl/infer/dropout_grad_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/dropout_grad_infer.h" +#include "nnacl/infer/infer_register.h" int DropoutGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -34,3 +35,5 @@ int DropoutGradInferShape(const TensorC *const *inputs, size_t inputs_size, Tens SetShapeTensor(output, input); return NNACL_OK; } + +REG_INFER(DropoutGrad, PrimType_DropoutGrad, DropoutGradInferShape) diff --git a/mindspore/lite/nnacl/infer/dropout_infer.c b/mindspore/lite/nnacl/infer/dropout_infer.c index d2d32becd4..3645b5d5ee 100644 --- a/mindspore/lite/nnacl/infer/dropout_infer.c +++ b/mindspore/lite/nnacl/infer/dropout_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/dropout_infer.h" +#include "nnacl/infer/infer_register.h" int DropoutInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -39,3 +40,5 @@ int DropoutInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC } return NNACL_OK; } + +REG_INFER(Dropout, PrimType_Dropout, DropoutInferShape) diff --git a/mindspore/lite/nnacl/infer/embedding_lookup_infer.c b/mindspore/lite/nnacl/infer/embedding_lookup_infer.c index fbc16e0934..e838b84ec2 100644 --- a/mindspore/lite/nnacl/infer/embedding_lookup_infer.c +++ b/mindspore/lite/nnacl/infer/embedding_lookup_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/embedding_lookup_infer.h" +#include "nnacl/infer/infer_register.h" int EmbeddingLookupInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -59,3 +60,5 @@ int EmbeddingLookupInferShape(const TensorC *const *inputs, size_t inputs_size, SetShapeArray(output, output_shape, output_shape_size); return NNACL_OK; } + +REG_INFER(EmbeddingLookup, PrimType_EmbeddingLookupFusion, EmbeddingLookupInferShape) diff --git a/mindspore/lite/nnacl/infer/expand_dims_infer.c b/mindspore/lite/nnacl/infer/expand_dims_infer.c index 8d22a05902..605188d62d 100644 --- a/mindspore/lite/nnacl/infer/expand_dims_infer.c +++ b/mindspore/lite/nnacl/infer/expand_dims_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/expand_dims_infer.h" +#include "nnacl/infer/infer_register.h" int ExpandDimsInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -44,3 +45,5 @@ int ExpandDimsInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso ShapeInsert(output->shape_, &(output->shape_size_), dim, 1); return NNACL_OK; } + +REG_INFER(ExpandDims, PrimType_ExpandDims, ExpandDimsInferShape) diff --git a/mindspore/lite/nnacl/infer/fft_imag_infer.c b/mindspore/lite/nnacl/infer/fft_imag_infer.c index 81bf648e7a..d794d7086e 100644 --- a/mindspore/lite/nnacl/infer/fft_imag_infer.c +++ b/mindspore/lite/nnacl/infer/fft_imag_infer.c @@ -15,8 +15,11 @@ */ #include "nnacl/infer/fft_imag_infer.h" +#include "nnacl/infer/infer_register.h" int FftImagInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { return FftInferShape(inputs, inputs_size, outputs, outputs_size, parameter); } + +REG_INFER(FftImag, PrimType_FftImag, FftImagInferShape) diff --git a/mindspore/lite/nnacl/infer/fft_real_infer.c b/mindspore/lite/nnacl/infer/fft_real_infer.c index fcd4cc1a50..cd95ebb596 100644 --- a/mindspore/lite/nnacl/infer/fft_real_infer.c +++ b/mindspore/lite/nnacl/infer/fft_real_infer.c @@ -15,8 +15,11 @@ */ #include "nnacl/infer/fft_real_infer.h" +#include "nnacl/infer/infer_register.h" int FftRealInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { return FftInferShape(inputs, inputs_size, outputs, outputs_size, parameter); } + +REG_INFER(FftReal, PrimType_FftReal, FftRealInferShape) diff --git a/mindspore/lite/nnacl/infer/fill_infer.c b/mindspore/lite/nnacl/infer/fill_infer.c index 4e6a9d478d..8359cdda46 100644 --- a/mindspore/lite/nnacl/infer/fill_infer.c +++ b/mindspore/lite/nnacl/infer/fill_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/fill_infer.h" +#include "nnacl/infer/infer_register.h" int FillInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -46,3 +47,5 @@ int FillInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o SetShapeArray(output, output_shape, output_shape_size); return NNACL_OK; } + +REG_INFER(Fill, PrimType_Fill, FillInferShape) diff --git a/mindspore/lite/nnacl/infer/flatten_grad_infer.c b/mindspore/lite/nnacl/infer/flatten_grad_infer.c index ddbd1f5858..1a28928674 100644 --- a/mindspore/lite/nnacl/infer/flatten_grad_infer.c +++ b/mindspore/lite/nnacl/infer/flatten_grad_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/flatten_grad_infer.h" +#include "nnacl/infer/infer_register.h" int FlattenGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -37,3 +38,5 @@ int FlattenGradInferShape(const TensorC *const *inputs, size_t inputs_size, Tens SetShapeArray(output, (int *)(inputs[1]->data_), output_shape_size); return NNACL_OK; } + +REG_INFER(FlattenGrad, PrimType_FlattenGrad, FlattenGradInferShape) diff --git a/mindspore/lite/nnacl/infer/flatten_infer.c b/mindspore/lite/nnacl/infer/flatten_infer.c index d6ac84691f..d1de665d96 100644 --- a/mindspore/lite/nnacl/infer/flatten_infer.c +++ b/mindspore/lite/nnacl/infer/flatten_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/flatten_infer.h" +#include "nnacl/infer/infer_register.h" int FlattenInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -45,3 +46,5 @@ int FlattenInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC SetShapeArray(output, output_shape, 2); return NNACL_OK; } + +REG_INFER(Flatten, PrimType_Flatten, FlattenInferShape) diff --git a/mindspore/lite/nnacl/infer/full_connection_infer.c b/mindspore/lite/nnacl/infer/full_connection_infer.c index fda4009b03..98b4f3f6c3 100644 --- a/mindspore/lite/nnacl/infer/full_connection_infer.c +++ b/mindspore/lite/nnacl/infer/full_connection_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/full_connection_infer.h" +#include "nnacl/infer/infer_register.h" int FullConnectionInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -75,3 +76,5 @@ int FullConnectionInferShape(const TensorC *const *inputs, size_t inputs_size, T return NNACL_OK; } + +REG_INFER(FullConnection, PrimType_FullConnection, FullConnectionInferShape) diff --git a/mindspore/lite/nnacl/infer/fused_batchnorm_infer.c b/mindspore/lite/nnacl/infer/fused_batchnorm_infer.c index d3428bf440..c9b09a280d 100644 --- a/mindspore/lite/nnacl/infer/fused_batchnorm_infer.c +++ b/mindspore/lite/nnacl/infer/fused_batchnorm_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/fused_batchnorm_infer.h" +#include "nnacl/infer/infer_register.h" int FusedBatchNormInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -32,3 +33,5 @@ int FusedBatchNormInferShape(const TensorC *const *inputs, size_t inputs_size, T } return 0; } + +REG_INFER(FusedBatchNorm, PrimType_FusedBatchNorm, FusedBatchNormInferShape) diff --git a/mindspore/lite/nnacl/infer/gather_infer.c b/mindspore/lite/nnacl/infer/gather_infer.c index 229faf81d9..f7c86d8cb9 100644 --- a/mindspore/lite/nnacl/infer/gather_infer.c +++ b/mindspore/lite/nnacl/infer/gather_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/gather_infer.h" +#include "nnacl/infer/infer_register.h" int GatherInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -57,3 +58,5 @@ int GatherInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * SetShapeArray(output, out_shape, out_shape_size); return NNACL_OK; } + +REG_INFER(Gather, PrimType_Gather, GatherInferShape) diff --git a/mindspore/lite/nnacl/infer/gather_nd_infer.c b/mindspore/lite/nnacl/infer/gather_nd_infer.c index 13db7b7443..9811cc5314 100644 --- a/mindspore/lite/nnacl/infer/gather_nd_infer.c +++ b/mindspore/lite/nnacl/infer/gather_nd_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/gather_nd_infer.h" +#include "nnacl/infer/infer_register.h" int GatherNdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -50,3 +51,5 @@ int GatherNdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC SetShapeArray(output, out_shape, out_shape_size); return NNACL_OK; } + +REG_INFER(GatherNd, PrimType_GatherNd, GatherNdInferShape) diff --git a/mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.c b/mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.c index 012eb49808..007ceaaf79 100644 --- a/mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.c +++ b/mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/group_conv2d_grad_input_infer.h" +#include "nnacl/infer/infer_register.h" int GroupConv2dGradInputInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { diff --git a/mindspore/lite/nnacl/infer/gru_infer.c b/mindspore/lite/nnacl/infer/gru_infer.c index e5a6b3a2e3..4013169a32 100644 --- a/mindspore/lite/nnacl/infer/gru_infer.c +++ b/mindspore/lite/nnacl/infer/gru_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/gru_infer.h" +#include "nnacl/infer/infer_register.h" int GruInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -80,3 +81,5 @@ int GruInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **ou SetShapeArray(outputs[1], state_shape, state_shape_size); return NNACL_OK; } + +REG_INFER(GRU, PrimType_GRU, GruInferShape) diff --git a/mindspore/lite/nnacl/infer/hashtable_lookup_infer.c b/mindspore/lite/nnacl/infer/hashtable_lookup_infer.c index 077d293043..aba93084c3 100644 --- a/mindspore/lite/nnacl/infer/hashtable_lookup_infer.c +++ b/mindspore/lite/nnacl/infer/hashtable_lookup_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/hashtable_lookup_infer.h" +#include "nnacl/infer/infer_register.h" int HashtableLoopupInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -42,3 +43,5 @@ int HashtableLoopupInferShape(const TensorC *const *inputs, size_t inputs_size, } return NNACL_OK; } + +REG_INFER(HashtableLookup, PrimType_HashtableLookup, HashtableLoopupInferShape) diff --git a/mindspore/lite/nnacl/infer/infer.h b/mindspore/lite/nnacl/infer/infer.h new file mode 100644 index 0000000000..ba89bedbae --- /dev/null +++ b/mindspore/lite/nnacl/infer/infer.h @@ -0,0 +1,33 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INFER_INFER_H_ +#define MINDSPORE_LITE_NNACL_INFER_INFER_H_ + +#include "nnacl/tensor_c.h" +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +typedef int (*InferShape)(const TensorC *const *inputs, size_t input_size, TensorC **outputs, size_t output_size, + OpParameter *parameter); + +InferShape GetInferFunc(int prim_type); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_INFER_INFER_H_ diff --git a/mindspore/lite/nnacl/infer/infer_register.c b/mindspore/lite/nnacl/infer/infer_register.c new file mode 100644 index 0000000000..b5ebf09b60 --- /dev/null +++ b/mindspore/lite/nnacl/infer/infer_register.c @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "nnacl/infer/infer_register.h" + +InferShape g_infer_func[PrimType_MAX]; + +InferShape GetInferFunc(int prim_type) { + if (prim_type < PrimType_MAX) { + return g_infer_func[prim_type]; + } + return NULL; +} + +void RegInfer(int prim_type, InferShape func) { + if (prim_type < PrimType_MAX) { + g_infer_func[prim_type] = func; + } +} diff --git a/mindspore/lite/nnacl/infer/infer_register.h b/mindspore/lite/nnacl/infer/infer_register.h new file mode 100644 index 0000000000..7e2ca85535 --- /dev/null +++ b/mindspore/lite/nnacl/infer/infer_register.h @@ -0,0 +1,226 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INFER_INFER_REGISTER_H_ +#define MINDSPORE_LITE_NNACL_INFER_INFER_REGISTER_H_ + +#include "nnacl/tensor_c.h" +#include "nnacl/op_base.h" +#include "nnacl/infer/infer.h" + +#ifdef __cplusplus +extern "C" { +#endif +enum PrimType { + PrimType_NONE = 0, + PrimType_Abs = 1, + PrimType_Activation = 2, + PrimType_ActivationGrad = 3, + PrimType_Adam = 4, + PrimType_AddFusion = 5, + PrimType_AdderFusion = 6, + PrimType_AddGrad = 7, + PrimType_AddN = 8, + PrimType_All = 9, + PrimType_ApplyMomentum = 10, + PrimType_ArgMaxFusion = 11, + PrimType_ArgMinFusion = 12, + PrimType_Assert = 13, + PrimType_Assign = 14, + PrimType_AssignAdd = 15, + PrimType_AudioSpectrogram = 16, + PrimType_AvgPoolFusion = 17, + PrimType_AvgPoolGrad = 18, + PrimType_BatchNorm = 19, + PrimType_BatchNormGrad = 20, + PrimType_BatchToSpace = 21, + PrimType_BatchToSpaceND = 22, + PrimType_BiasAdd = 23, + PrimType_BinaryCrossEntropy = 24, + PrimType_BinaryCrossEntropyGrad = 25, + PrimType_BiasAddGrad = 26, + PrimType_BroadcastTo = 27, + PrimType_Cast = 28, + PrimType_Ceil = 29, + PrimType_Clip = 30, + PrimType_Concat = 31, + PrimType_ControlDepend = 32, + PrimType_Conv2DBackpropFilterFusion = 33, + PrimType_Conv2DBackpropInputFusion = 34, + PrimType_Conv2DFusion = 35, + PrimType_Conv2dTransposeFusion = 36, + PrimType_Cos = 37, + PrimType_ConstantOfShape = 38, + PrimType_Crop = 39, + PrimType_CustomExtractFeatures = 40, + PrimType_CustomNormalize = 41, + PrimType_CustomPredict = 42, + PrimType_DeConv2DGradFilter = 43, + PrimType_Depend = 44, + PrimType_DepthToSpace = 45, + PrimType_DetectionPostProcess = 46, + PrimType_DivFusion = 47, + PrimType_DivGrad = 48, + PrimType_Dropout = 49, + PrimType_DropoutGrad = 50, + PrimType_Elu = 51, + PrimType_Eltwise = 52, + PrimType_Equal = 53, + PrimType_EmbeddingLookupFusion = 54, + PrimType_ExpFusion = 55, + PrimType_ExpandDims = 56, + PrimType_FakeQuantWithMinMaxVars = 57, + PrimType_FakeQuantWithMinMaxVarsPerChannel = 58, + PrimType_FftReal = 59, + PrimType_FftImag = 60, + PrimType_Flatten = 61, + PrimType_FlattenGrad = 62, + PrimType_Floor = 63, + PrimType_FloorDiv = 64, + PrimType_FloorMod = 65, + PrimType_Fill = 66, + PrimType_FullConnection = 67, + PrimType_FusedBatchNorm = 68, + PrimType_Gather = 69, + PrimType_GatherNd = 70, + PrimType_Greater = 71, + PrimType_GreaterEqual = 72, + PrimType_HashtableLookup = 73, + PrimType_InstanceNorm = 74, + PrimType_LayerNormFusion = 75, + PrimType_LeakyRelu = 76, + PrimType_Less = 77, + PrimType_LessEqual = 78, + PrimType_Log = 79, + PrimType_LogGrad = 80, + PrimType_LogicalAnd = 81, + PrimType_LogicalNot = 82, + PrimType_LogicalOr = 83, + PrimType_LpNormalization = 84, + PrimType_LRN = 85, + PrimType_LshProjection = 86, + PrimType_LSTM = 87, + PrimType_L2NormalizeFusion = 88, + PrimType_MatMul = 89, + PrimType_Maximum = 90, + PrimType_MaximumGrad = 91, + PrimType_MaxPoolFusion = 92, + PrimType_MaxPoolGrad = 93, + PrimType_Merge = 94, + PrimType_Mfcc = 95, + PrimType_Minimum = 96, + PrimType_MinimumGrad = 97, + PrimType_Mod = 98, + PrimType_MulFusion = 99, + PrimType_MulGrad = 100, + PrimType_Neg = 101, + PrimType_NegGrad = 102, + PrimType_NotEqual = 103, + PrimType_NonMaxSuppression = 104, + PrimType_OneHot = 105, + PrimType_OnesLike = 106, + PrimType_PadFusion = 107, + PrimType_PartialFusion = 108, + PrimType_PowerGrad = 109, + PrimType_PowFusion = 110, + PrimType_PriorBox = 111, + PrimType_PReLUFusion = 112, + PrimType_QuantDTypeCast = 113, + PrimType_Rank = 114, + PrimType_Range = 115, + PrimType_Reciprocal = 116, + PrimType_RealDiv = 117, + PrimType_ReduceFusion = 118, + PrimType_Reshape = 119, + PrimType_Resize = 120, + PrimType_ReverseSequence = 121, + PrimType_ReverseV2 = 122, + PrimType_Rfft = 123, + PrimType_ROIPooling = 124, + PrimType_Round = 125, + PrimType_Rsqrt = 126, + PrimType_ScaleFusion = 127, + PrimType_ScatterNd = 128, + PrimType_SGD = 129, + PrimType_Shape = 130, + PrimType_SigmoidCrossEntropyWithLogits = 131, + PrimType_SigmoidCrossEntropyWithLogitsGrad = 132, + PrimType_Sin = 133, + PrimType_SkipGram = 134, + PrimType_SliceFusion = 135, + PrimType_SmoothL1Loss = 136, + PrimType_SmoothL1LossGrad = 137, + PrimType_Softmax = 138, + PrimType_SoftmaxCrossEntropyWithLogits = 139, + PrimType_SpaceToBatch = 140, + PrimType_SpaceToBatchND = 141, + PrimType_SpaceToDepth = 142, + PrimType_SparseSoftmaxCrossEntropyWithLogits = 143, + PrimType_SparseToDense = 144, + PrimType_Split = 145, + PrimType_Sqrt = 146, + PrimType_Squeeze = 147, + PrimType_Square = 148, + PrimType_SquaredDifference = 149, + PrimType_Stack = 150, + PrimType_StridedSlice = 151, + PrimType_SubFusion = 152, + PrimType_SubGrad = 153, + PrimType_Switch = 154, + PrimType_TensorListFromTensor = 155, + PrimType_TensorListGetItem = 156, + PrimType_TensorListReserve = 157, + PrimType_TensorListSetItem = 158, + PrimType_TensorListStack = 159, + PrimType_TileFusion = 160, + PrimType_TopKFusion = 161, + PrimType_Transpose = 162, + PrimType_Unique = 163, + PrimType_UnsortedSegmentSum = 164, + PrimType_Unsqueeze = 165, + PrimType_Unstack = 166, + PrimType_While = 167, + PrimType_Where = 168, + PrimType_ZerosLike = 169, + PrimType_Select = 170, + PrimType_If = 171, + PrimType_GRU = 172, + PrimType_NonZero = 173, + PrimType_InvertPermutation = 174, + PrimType_Size = 175, + PrimType_RandomStandardNormal = 176, + PrimType_CropAndResize = 177, + PrimType_Erf = 178, + PrimType_StridedSliceGrad = 179, + PrimType_IsFinite = 180, + PrimType_LinSpace = 181, + PrimType_UniformReal = 182, + PrimType_AbsGrad = 183, + PrimType_RsqrtGrad = 184, + PrimType_SqrtGrad = 185, + PrimType_LayerNormGrad = 186, + PrimType_ResizeGrad = 187, + PrimType_MIN = PrimType_NONE, + PrimType_MAX = PrimType_ResizeGrad +}; + +void RegInfer(int prim_type, InferShape func); + +#define REG_INFER(op, type, func) \ + __attribute__((constructor)) void Reg##op##Infer() { RegInfer(type, func); } +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_INFER_INFER_REGISTER_H_ diff --git a/mindspore/lite/nnacl/infer/invert_permutation_infer.c b/mindspore/lite/nnacl/infer/invert_permutation_infer.c index e72c6917ce..dc24facc69 100644 --- a/mindspore/lite/nnacl/infer/invert_permutation_infer.c +++ b/mindspore/lite/nnacl/infer/invert_permutation_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/invert_permutation_infer.h" +#include "nnacl/infer/infer_register.h" int InvertPermutationInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -40,3 +41,5 @@ int InvertPermutationInferShape(const TensorC *const *inputs, size_t inputs_size SetShapeTensor(output, input); return NNACL_OK; } + +REG_INFER(InvertPermutation, PrimType_InvertPermutation, InvertPermutationInferShape) diff --git a/mindspore/lite/nnacl/infer/layer_norm_grad_infer.c b/mindspore/lite/nnacl/infer/layer_norm_grad_infer.c index 7e0524ce97..880fe1f411 100644 --- a/mindspore/lite/nnacl/infer/layer_norm_grad_infer.c +++ b/mindspore/lite/nnacl/infer/layer_norm_grad_infer.c @@ -16,6 +16,7 @@ #include "nnacl/infer/layer_norm_grad_infer.h" #include "nnacl/infer/common_infer.h" #include "nnacl/fp32_grad/layernormgrad_parameter.h" +#include "nnacl/infer/infer_register.h" int LayerNormGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -46,3 +47,5 @@ int LayerNormGradInferShape(const TensorC *const *inputs, size_t inputs_size, Te output_dg->shape_size_ = size; return NNACL_OK; } + +REG_INFER(LayerNormGrad, PrimType_LayerNormGrad, LayerNormGradInferShape) diff --git a/mindspore/lite/nnacl/infer/layer_norm_infer.c b/mindspore/lite/nnacl/infer/layer_norm_infer.c index ee6adbdd33..1b132b9c0f 100644 --- a/mindspore/lite/nnacl/infer/layer_norm_infer.c +++ b/mindspore/lite/nnacl/infer/layer_norm_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/layer_norm_infer.h" +#include "nnacl/infer/infer_register.h" int LayerNormInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -55,3 +56,5 @@ int LayerNormInferShape(const TensorC *const *inputs, size_t inputs_size, Tensor return NNACL_OK; } + +REG_INFER(LayerNormFusion, PrimType_LayerNormFusion, LayerNormInferShape) diff --git a/mindspore/lite/nnacl/infer/lin_space_infer.c b/mindspore/lite/nnacl/infer/lin_space_infer.c index 60d31a0e4e..3698f70b5c 100644 --- a/mindspore/lite/nnacl/infer/lin_space_infer.c +++ b/mindspore/lite/nnacl/infer/lin_space_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/lin_space_infer.h" +#include "nnacl/infer/infer_register.h" int LinSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -41,3 +42,5 @@ int LinSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC output->shape_[0] = num[0]; return NNACL_OK; } + +REG_INFER(LinSpace, PrimType_LinSpace, LinSpaceInferShape) diff --git a/mindspore/lite/nnacl/infer/lsh_projection_infer.c b/mindspore/lite/nnacl/infer/lsh_projection_infer.c index 47f7aa66f3..e2dea5ad31 100644 --- a/mindspore/lite/nnacl/infer/lsh_projection_infer.c +++ b/mindspore/lite/nnacl/infer/lsh_projection_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/lsh_projection_infer.h" +#include "nnacl/infer/infer_register.h" int LshProjectionInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -49,3 +50,5 @@ int LshProjectionInferShape(const TensorC *const *inputs, size_t inputs_size, Te SetShapeArray(out_tensor, out_shape, out_shape_size); return NNACL_OK; } + +REG_INFER(LshProjection, PrimType_LshProjection, LshProjectionInferShape) diff --git a/mindspore/lite/nnacl/infer/lstm_infer.c b/mindspore/lite/nnacl/infer/lstm_infer.c index e9ed86f7e5..2dc7ee7132 100644 --- a/mindspore/lite/nnacl/infer/lstm_infer.c +++ b/mindspore/lite/nnacl/infer/lstm_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/lstm_infer.h" +#include "nnacl/infer/infer_register.h" int LstmInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -63,3 +64,5 @@ int LstmInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o return NNACL_OK; } + +REG_INFER(LSTM, PrimType_LSTM, LstmInferShape) diff --git a/mindspore/lite/nnacl/infer/matmul_infer.c b/mindspore/lite/nnacl/infer/matmul_infer.c index c0e21beff9..fff6ff5577 100644 --- a/mindspore/lite/nnacl/infer/matmul_infer.c +++ b/mindspore/lite/nnacl/infer/matmul_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/matmul_infer.h" +#include "nnacl/infer/infer_register.h" int MatmulInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -84,3 +85,5 @@ int MatmulInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * SetShapeArray(output, c_shape, c_shape_size); return NNACL_OK; } + +REG_INFER(MatMul, PrimType_MatMul, MatmulInferShape) diff --git a/mindspore/lite/nnacl/infer/maximum_grad_infer.c b/mindspore/lite/nnacl/infer/maximum_grad_infer.c index 53c6793d5f..c06774e1a9 100644 --- a/mindspore/lite/nnacl/infer/maximum_grad_infer.c +++ b/mindspore/lite/nnacl/infer/maximum_grad_infer.c @@ -16,6 +16,7 @@ #include "nnacl/infer/maximum_grad_infer.h" #include "nnacl/arithmetic.h" +#include "nnacl/infer/infer_register.h" int MaximumGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -58,3 +59,5 @@ int MaximumGradInferShape(const TensorC *const *inputs, size_t inputs_size, Tens SetDataTypeFormat(dx2, dy); return NNACL_OK; } + +REG_INFER(MaximumGrad, PrimType_MaximumGrad, MaximumGradInferShape) diff --git a/mindspore/lite/nnacl/infer/mean_infer.c b/mindspore/lite/nnacl/infer/mean_infer.c index f9f056af50..6ad78f3fb2 100644 --- a/mindspore/lite/nnacl/infer/mean_infer.c +++ b/mindspore/lite/nnacl/infer/mean_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/mean_infer.h" +#include "nnacl/infer/infer_register.h" int MeanInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { diff --git a/mindspore/lite/nnacl/infer/merge_infer.c b/mindspore/lite/nnacl/infer/merge_infer.c index 7660f169db..7de9a12cba 100644 --- a/mindspore/lite/nnacl/infer/merge_infer.c +++ b/mindspore/lite/nnacl/infer/merge_infer.c @@ -16,6 +16,7 @@ #include "nnacl/infer/merge_infer.h" #include +#include "nnacl/infer/infer_register.h" int MergeAbleToInfer(const TensorC *const *inputs, size_t inputs_size) { for (size_t i = 0; i < inputs_size; i++) { @@ -94,3 +95,5 @@ int MergeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** return NNACL_INFER_INVALID; } + +REG_INFER(Merge, PrimType_Merge, MergeInferShape) diff --git a/mindspore/lite/nnacl/infer/mfcc_infer.c b/mindspore/lite/nnacl/infer/mfcc_infer.c index 41b0b3eaff..bae34ada0e 100644 --- a/mindspore/lite/nnacl/infer/mfcc_infer.c +++ b/mindspore/lite/nnacl/infer/mfcc_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/mfcc_infer.h" +#include "nnacl/infer/infer_register.h" int MfccInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -44,3 +45,5 @@ int MfccInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o output->shape_[2] = param->dct_coeff_num_; return NNACL_OK; } + +REG_INFER(Mfcc, PrimType_Mfcc, MfccInferShape) diff --git a/mindspore/lite/nnacl/infer/non_max_suppression_infer.c b/mindspore/lite/nnacl/infer/non_max_suppression_infer.c index 4e5313ae09..72e27c6e5d 100644 --- a/mindspore/lite/nnacl/infer/non_max_suppression_infer.c +++ b/mindspore/lite/nnacl/infer/non_max_suppression_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/non_max_suppression_infer.h" +#include "nnacl/infer/infer_register.h" int NonMaxSuppressionInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -31,3 +32,5 @@ int NonMaxSuppressionInferShape(const TensorC *const *inputs, size_t inputs_size output->format_ = input->format_; return NNACL_INFER_INVALID; } + +REG_INFER(NonMaxSuppression, PrimType_NonMaxSuppression, NonMaxSuppressionInferShape) diff --git a/mindspore/lite/nnacl/infer/one_hot_infer.c b/mindspore/lite/nnacl/infer/one_hot_infer.c index dec08de885..f001f03adb 100644 --- a/mindspore/lite/nnacl/infer/one_hot_infer.c +++ b/mindspore/lite/nnacl/infer/one_hot_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/one_hot_infer.h" +#include "nnacl/infer/infer_register.h" int OneHotInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -54,3 +55,5 @@ int OneHotInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * return NNACL_OK; } + +REG_INFER(OneHot, PrimType_OneHot, OneHotInferShape) diff --git a/mindspore/lite/nnacl/infer/pad_infer.c b/mindspore/lite/nnacl/infer/pad_infer.c index eaf328f3b8..97651cb714 100644 --- a/mindspore/lite/nnacl/infer/pad_infer.c +++ b/mindspore/lite/nnacl/infer/pad_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/pad_infer.h" +#include "nnacl/infer/infer_register.h" int PadInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -59,3 +60,5 @@ int PadInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **ou SetShapeArray(output, output_shape, output_shape_size); return NNACL_OK; } + +REG_INFER(Pad, PrimType_PadFusion, PadInferShape) diff --git a/mindspore/lite/nnacl/infer/partial_infer.c b/mindspore/lite/nnacl/infer/partial_infer.c index 5fa89a3b8e..8e235fab04 100644 --- a/mindspore/lite/nnacl/infer/partial_infer.c +++ b/mindspore/lite/nnacl/infer/partial_infer.c @@ -15,8 +15,11 @@ */ #include "nnacl/infer/partial_infer.h" +#include "nnacl/infer/infer_register.h" int PartialInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { return NNACL_OK; } + +REG_INFER(Partial, PrimType_PartialFusion, PartialInferShape) diff --git a/mindspore/lite/nnacl/infer/pooling_grad_infer.c b/mindspore/lite/nnacl/infer/pooling_grad_infer.c index 31f759cb4e..9f80e012da 100644 --- a/mindspore/lite/nnacl/infer/pooling_grad_infer.c +++ b/mindspore/lite/nnacl/infer/pooling_grad_infer.c @@ -16,6 +16,7 @@ #include "nnacl/infer/pooling_grad_infer.h" #include +#include "nnacl/infer/infer_register.h" int PoolingGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -60,3 +61,6 @@ int PoolingGradInferShape(const TensorC *const *inputs, size_t inputs_size, Tens SetShapeTensor(outputs[0], input); return NNACL_OK; } + +REG_INFER(AvgPoolGrad, PrimType_AvgPoolGrad, PoolingGradInferShape) +REG_INFER(MaxPoolGrad, PrimType_MaxPoolGrad, PoolingGradInferShape) diff --git a/mindspore/lite/nnacl/infer/pooling_infer.c b/mindspore/lite/nnacl/infer/pooling_infer.c index 44803ccabc..f6767632e3 100644 --- a/mindspore/lite/nnacl/infer/pooling_infer.c +++ b/mindspore/lite/nnacl/infer/pooling_infer.c @@ -16,6 +16,7 @@ #include "nnacl/infer/pooling_infer.h" #include +#include "nnacl/infer/infer_register.h" int PoolingInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -81,3 +82,6 @@ int PoolingInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC SetShapeArray(output, input_shape, input_shape_size); return NNACL_OK; } + +REG_INFER(MaxPool, PrimType_MaxPoolFusion, PoolingInferShape) +REG_INFER(AvgPool, PrimType_AvgPoolFusion, PoolingInferShape) diff --git a/mindspore/lite/nnacl/infer/power_infer.c b/mindspore/lite/nnacl/infer/power_infer.c index 366cce464a..8cc73bea9e 100644 --- a/mindspore/lite/nnacl/infer/power_infer.c +++ b/mindspore/lite/nnacl/infer/power_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/power_infer.h" +#include "nnacl/infer/infer_register.h" int PowerInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -52,3 +53,5 @@ int PowerInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** SetShapeTensor(output_tensor, x_tensor); return NNACL_OK; } + +REG_INFER(Pow, PrimType_PowFusion, PowerInferShape) diff --git a/mindspore/lite/nnacl/infer/prior_box_infer.c b/mindspore/lite/nnacl/infer/prior_box_infer.c index 833722ec04..6131d5e0f1 100644 --- a/mindspore/lite/nnacl/infer/prior_box_infer.c +++ b/mindspore/lite/nnacl/infer/prior_box_infer.c @@ -16,6 +16,7 @@ #include "nnacl/infer/prior_box_infer.h" #include +#include "nnacl/infer/infer_register.h" int PriorBoxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -75,3 +76,5 @@ int PriorBoxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC output->shape_[3] = kPriorBoxC; return NNACL_OK; } + +REG_INFER(PriorBox, PrimType_PriorBox, PriorBoxInferShape) diff --git a/mindspore/lite/nnacl/infer/quant_dtype_cast_infer.c b/mindspore/lite/nnacl/infer/quant_dtype_cast_infer.c index 98eddceb4d..d0caa00192 100644 --- a/mindspore/lite/nnacl/infer/quant_dtype_cast_infer.c +++ b/mindspore/lite/nnacl/infer/quant_dtype_cast_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/quant_dtype_cast_infer.h" +#include "nnacl/infer/infer_register.h" int QuantDtypeCastInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -40,3 +41,5 @@ int QuantDtypeCastInferShape(const TensorC *const *inputs, size_t inputs_size, T SetShapeTensor(output, input); return NNACL_OK; } + +REG_INFER(QuantDTypeCast, PrimType_QuantDTypeCast, QuantDtypeCastInferShape) diff --git a/mindspore/lite/nnacl/infer/random_standard_normal_infer.c b/mindspore/lite/nnacl/infer/random_standard_normal_infer.c index 7488b52bb3..db5d25e2d9 100644 --- a/mindspore/lite/nnacl/infer/random_standard_normal_infer.c +++ b/mindspore/lite/nnacl/infer/random_standard_normal_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/random_standard_normal_infer.h" +#include "nnacl/infer/infer_register.h" int RandomStandardNormalInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -44,3 +45,5 @@ int RandomStandardNormalInferShape(const TensorC *const *inputs, size_t inputs_s outputs[0]->format_ = inputs[0]->format_; return NNACL_OK; } + +REG_INFER(RandomStandardNormal, PrimType_RandomStandardNormal, RandomStandardNormalInferShape) diff --git a/mindspore/lite/nnacl/infer/range_infer.c b/mindspore/lite/nnacl/infer/range_infer.c index b6cd282e8b..b596a79568 100644 --- a/mindspore/lite/nnacl/infer/range_infer.c +++ b/mindspore/lite/nnacl/infer/range_infer.c @@ -16,6 +16,7 @@ #include "nnacl/infer/range_infer.h" #include +#include "nnacl/infer/infer_register.h" int RangeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -75,3 +76,5 @@ int RangeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** output->shape_[0] = shape_size; return NNACL_OK; } + +REG_INFER(Range, PrimType_Range, RangeInferShape) diff --git a/mindspore/lite/nnacl/infer/rank_infer.c b/mindspore/lite/nnacl/infer/rank_infer.c index 338786f47a..a0abd7f8cc 100644 --- a/mindspore/lite/nnacl/infer/rank_infer.c +++ b/mindspore/lite/nnacl/infer/rank_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/rank_infer.h" +#include "nnacl/infer/infer_register.h" int RankInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -35,3 +36,5 @@ int RankInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o output->shape_[0] = 1; return NNACL_OK; } + +REG_INFER(Rank, PrimType_Rank, RankInferShape) diff --git a/mindspore/lite/nnacl/infer/reduce_infer.c b/mindspore/lite/nnacl/infer/reduce_infer.c index d712437568..7751ce7abe 100644 --- a/mindspore/lite/nnacl/infer/reduce_infer.c +++ b/mindspore/lite/nnacl/infer/reduce_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/reduce_infer.h" +#include "nnacl/infer/infer_register.h" int ReduceOnAllAxes(const TensorC *input, TensorC *output, int *out_shape, size_t out_shape_size, bool keep_dims) { if (keep_dims) { @@ -111,3 +112,5 @@ int ReduceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * // reduce on selected axes return ReduceOnSelectedAxes(input, num_axes, actual_axes, output, out_shape, out_shape_size, keep_dims); } + +REG_INFER(Reduce, PrimType_ReduceFusion, ReduceInferShape) diff --git a/mindspore/lite/nnacl/infer/reshape_infer.c b/mindspore/lite/nnacl/infer/reshape_infer.c index 92cd72eca0..8b1498c5fd 100644 --- a/mindspore/lite/nnacl/infer/reshape_infer.c +++ b/mindspore/lite/nnacl/infer/reshape_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/reshape_infer.h" +#include "nnacl/infer/infer_register.h" void CalShape(int *data, const TensorC *const *inputs, int *out_shape, size_t *out_shape_size, int shape_size) { int input_count = GetElementNum(inputs[0]); @@ -176,3 +177,5 @@ int ReshapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC SetShapeArray(output, out_shape, out_shape_size); return NNACL_OK; } + +REG_INFER(Reshape, PrimType_Reshape, ReshapeInferShape) diff --git a/mindspore/lite/nnacl/infer/resize_infer.c b/mindspore/lite/nnacl/infer/resize_infer.c index 36e3c3e1e8..562c9137e9 100644 --- a/mindspore/lite/nnacl/infer/resize_infer.c +++ b/mindspore/lite/nnacl/infer/resize_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/resize_infer.h" +#include "nnacl/infer/infer_register.h" int CalculateNewHeightAndWidth(const TensorC *const *inputs, size_t inputs_size, ResizeParameter *param) { const TensorC *input = inputs[0]; @@ -138,3 +139,5 @@ int ResizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * } return ret; } + +REG_INFER(Resize, PrimType_Resize, ResizeInferShape) diff --git a/mindspore/lite/nnacl/infer/rfft_infer.c b/mindspore/lite/nnacl/infer/rfft_infer.c index a1e0593027..695f8d8470 100644 --- a/mindspore/lite/nnacl/infer/rfft_infer.c +++ b/mindspore/lite/nnacl/infer/rfft_infer.c @@ -15,6 +15,8 @@ */ #include "nnacl/infer/rfft_infer.h" +#include "nnacl/infer/infer_register.h" + int RfftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { #ifdef Debug @@ -37,3 +39,5 @@ int RfftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o ShapePush(output->shape_, &(output->shape_size_), 2); return NNACL_OK; } + +REG_INFER(Rfft, PrimType_Rfft, RfftInferShape) diff --git a/mindspore/lite/nnacl/infer/roi_pooling_infer.c b/mindspore/lite/nnacl/infer/roi_pooling_infer.c index f082feb047..a4484414d3 100644 --- a/mindspore/lite/nnacl/infer/roi_pooling_infer.c +++ b/mindspore/lite/nnacl/infer/roi_pooling_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/roi_pooling_infer.h" +#include "nnacl/infer/infer_register.h" int ROIPoolingInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -41,3 +42,5 @@ int ROIPoolingInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso output->shape_[3] = GetChannel(input); return NNACL_OK; } + +REG_INFER(ROIPooling, PrimType_ROIPooling, ROIPoolingInferShape) diff --git a/mindspore/lite/nnacl/infer/scatter_nd_infer.c b/mindspore/lite/nnacl/infer/scatter_nd_infer.c index ad034fbc05..ca980b6215 100644 --- a/mindspore/lite/nnacl/infer/scatter_nd_infer.c +++ b/mindspore/lite/nnacl/infer/scatter_nd_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/scatter_nd_infer.h" +#include "nnacl/infer/infer_register.h" int ScatterNdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -40,3 +41,5 @@ int ScatterNdInferShape(const TensorC *const *inputs, size_t inputs_size, Tensor SetShapeArray(output, shape_data, GetElementNum(shape)); return NNACL_OK; } + +REG_INFER(ScatterNd, PrimType_ScatterNd, ScatterNdInferShape) diff --git a/mindspore/lite/nnacl/infer/select_infer.c b/mindspore/lite/nnacl/infer/select_infer.c index a42c108206..8cb3183350 100644 --- a/mindspore/lite/nnacl/infer/select_infer.c +++ b/mindspore/lite/nnacl/infer/select_infer.c @@ -16,6 +16,7 @@ #include "nnacl/infer/select_infer.h" #include +#include "nnacl/infer/infer_register.h" int SelectInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -54,3 +55,5 @@ int SelectInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * } return NNACL_OK; } + +REG_INFER(Select, PrimType_Select, SelectInferShape) diff --git a/mindspore/lite/nnacl/infer/sgd_infer.c b/mindspore/lite/nnacl/infer/sgd_infer.c index 047fb1f907..1bef6d99e1 100644 --- a/mindspore/lite/nnacl/infer/sgd_infer.c +++ b/mindspore/lite/nnacl/infer/sgd_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/sgd_infer.h" +#include "nnacl/infer/infer_register.h" int SgdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -38,3 +39,5 @@ int SgdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **ou return NNACL_OK; } + +REG_INFER(SGD, PrimType_SGD, SgdInferShape) diff --git a/mindspore/lite/nnacl/infer/shape_infer.c b/mindspore/lite/nnacl/infer/shape_infer.c index 62dd25ae52..551d4a3def 100644 --- a/mindspore/lite/nnacl/infer/shape_infer.c +++ b/mindspore/lite/nnacl/infer/shape_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/shape_infer.h" +#include "nnacl/infer/infer_register.h" int ShapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -37,3 +38,5 @@ int ShapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** out_tensor->shape_[0] = (int)(in_tensor->shape_size_); return NNACL_OK; } + +REG_INFER(Shape, PrimType_Shape, ShapeInferShape) diff --git a/mindspore/lite/nnacl/infer/size_infer.c b/mindspore/lite/nnacl/infer/size_infer.c index 0b5587c58a..014a8d7397 100644 --- a/mindspore/lite/nnacl/infer/size_infer.c +++ b/mindspore/lite/nnacl/infer/size_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/size_infer.h" +#include "nnacl/infer/infer_register.h" int SizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -38,3 +39,5 @@ int SizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o return NNACL_OK; } + +REG_INFER(SizeOp, PrimType_Size, SizeInferShape) diff --git a/mindspore/lite/nnacl/infer/skip_gram_infer.c b/mindspore/lite/nnacl/infer/skip_gram_infer.c index 857632f1be..4ff4b12010 100644 --- a/mindspore/lite/nnacl/infer/skip_gram_infer.c +++ b/mindspore/lite/nnacl/infer/skip_gram_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/skip_gram_infer.h" +#include "nnacl/infer/infer_register.h" int SkipGramInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -34,3 +35,5 @@ int SkipGramInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC } return NNACL_OK; } + +REG_INFER(SkipGram, PrimType_SkipGram, SkipGramInferShape) diff --git a/mindspore/lite/nnacl/infer/slice_infer.c b/mindspore/lite/nnacl/infer/slice_infer.c index 390902d83f..40a552c048 100644 --- a/mindspore/lite/nnacl/infer/slice_infer.c +++ b/mindspore/lite/nnacl/infer/slice_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/slice_infer.h" +#include "nnacl/infer/infer_register.h" int SliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -79,3 +80,5 @@ int SliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** } return NNACL_OK; } + +REG_INFER(Slice, PrimType_SliceFusion, SliceInferShape) diff --git a/mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.c b/mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.c index 394625ecd0..e1173741de 100644 --- a/mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.c +++ b/mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/softmax_cross_entropy_infer.h" +#include "nnacl/infer/infer_register.h" int SoftmaxCrossEntropyInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -43,3 +44,5 @@ int SoftmaxCrossEntropyInferShape(const TensorC *const *inputs, size_t inputs_si } return NNACL_OK; } + +REG_INFER(SoftmaxCrossEntropyWithLogits, PrimType_SoftmaxCrossEntropyWithLogits, SoftmaxCrossEntropyInferShape) diff --git a/mindspore/lite/nnacl/infer/softmax_infer.c b/mindspore/lite/nnacl/infer/softmax_infer.c index 19167bf8d6..c13dc81888 100644 --- a/mindspore/lite/nnacl/infer/softmax_infer.c +++ b/mindspore/lite/nnacl/infer/softmax_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/softmax_infer.h" +#include "nnacl/infer/infer_register.h" int SoftMaxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -39,3 +40,5 @@ int SoftMaxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC SetShapeTensor(output, input); return NNACL_OK; } + +REG_INFER(Softmax, PrimType_Softmax, SoftMaxInferShape) diff --git a/mindspore/lite/nnacl/infer/space_to_batch_infer.c b/mindspore/lite/nnacl/infer/space_to_batch_infer.c index 21f48c8267..a9e5fdf2f5 100644 --- a/mindspore/lite/nnacl/infer/space_to_batch_infer.c +++ b/mindspore/lite/nnacl/infer/space_to_batch_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/space_to_batch_infer.h" +#include "nnacl/infer/infer_register.h" int SpaceToBatchInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -57,3 +58,5 @@ int SpaceToBatchInferShape(const TensorC *const *inputs, size_t inputs_size, Ten outputs[0]->shape_size_ = input->shape_size_; return NNACL_OK; } + +REG_INFER(SpaceToBatch, PrimType_SpaceToBatch, SpaceToBatchInferShape) diff --git a/mindspore/lite/nnacl/infer/space_to_batch_nd_infer.c b/mindspore/lite/nnacl/infer/space_to_batch_nd_infer.c index ef89043bf2..84304424e2 100644 --- a/mindspore/lite/nnacl/infer/space_to_batch_nd_infer.c +++ b/mindspore/lite/nnacl/infer/space_to_batch_nd_infer.c @@ -16,6 +16,7 @@ #include "nnacl/infer/space_to_batch_nd_infer.h" #include +#include "nnacl/infer/infer_register.h" int SpaceSetOutputShapeFromParam(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -133,3 +134,5 @@ int SpaceToBatchNdInferShape(const TensorC *const *inputs, size_t inputs_size, T } return NNACL_OK; } + +REG_INFER(SpaceToBatchND, PrimType_SpaceToBatchND, SpaceToBatchNdInferShape) diff --git a/mindspore/lite/nnacl/infer/space_to_depth_infer.c b/mindspore/lite/nnacl/infer/space_to_depth_infer.c index 803925c2f0..d44fe0cbfd 100644 --- a/mindspore/lite/nnacl/infer/space_to_depth_infer.c +++ b/mindspore/lite/nnacl/infer/space_to_depth_infer.c @@ -16,6 +16,7 @@ #include "nnacl/infer/space_to_depth_infer.h" #include +#include "nnacl/infer/infer_register.h" int SpaceToDepthInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -57,3 +58,5 @@ int SpaceToDepthInferShape(const TensorC *const *inputs, size_t inputs_size, Ten outputs[0]->shape_size_ = input->shape_size_; return NNACL_OK; } + +REG_INFER(SpaceToDepth, PrimType_SpaceToDepth, SpaceToDepthInferShape) diff --git a/mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.c b/mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.c index 70703ce8eb..3d4b1796f6 100644 --- a/mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.c +++ b/mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.c @@ -16,6 +16,7 @@ #include "nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.h" #include "nnacl/fp32_grad/softmax_grad.h" +#include "nnacl/infer/infer_register.h" int SparseSoftmaxCrossEntropyWithLogitsInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -41,3 +42,6 @@ int SparseSoftmaxCrossEntropyWithLogitsInferShape(const TensorC *const *inputs, return NNACL_OK; } + +REG_INFER(SparseSoftmaxCrossEntropyWithLogits, PrimType_SparseSoftmaxCrossEntropyWithLogits, + SparseSoftmaxCrossEntropyWithLogitsInferShape) diff --git a/mindspore/lite/nnacl/infer/sparse_to_dense_infer.c b/mindspore/lite/nnacl/infer/sparse_to_dense_infer.c index 1bb8b3092d..9c74075ca1 100644 --- a/mindspore/lite/nnacl/infer/sparse_to_dense_infer.c +++ b/mindspore/lite/nnacl/infer/sparse_to_dense_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/sparse_to_dense_infer.h" +#include "nnacl/infer/infer_register.h" int SparseToDenseInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -41,3 +42,5 @@ int SparseToDenseInferShape(const TensorC *const *inputs, size_t inputs_size, Te SetShapeArray(output, output_shape, output_shape_size); return NNACL_OK; } + +REG_INFER(SparseToDense, PrimType_SparseToDense, SparseToDenseInferShape) diff --git a/mindspore/lite/nnacl/infer/splice_infer.c b/mindspore/lite/nnacl/infer/splice_infer.c index 9fbd929850..f1fc80c79a 100644 --- a/mindspore/lite/nnacl/infer/splice_infer.c +++ b/mindspore/lite/nnacl/infer/splice_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/splice_infer.h" +#include "nnacl/infer/infer_register.h" int SpliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { diff --git a/mindspore/lite/nnacl/infer/split_infer.c b/mindspore/lite/nnacl/infer/split_infer.c index faeed864e3..aa3dd0fb00 100644 --- a/mindspore/lite/nnacl/infer/split_infer.c +++ b/mindspore/lite/nnacl/infer/split_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/split_infer.h" +#include "nnacl/infer/infer_register.h" int SplitInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -78,3 +79,5 @@ int SplitInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** } return NNACL_OK; } + +REG_INFER(Split, PrimType_Split, SplitInferShape) diff --git a/mindspore/lite/nnacl/infer/squeeze_infer.c b/mindspore/lite/nnacl/infer/squeeze_infer.c index 56edc250de..766e90d2b8 100644 --- a/mindspore/lite/nnacl/infer/squeeze_infer.c +++ b/mindspore/lite/nnacl/infer/squeeze_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/squeeze_infer.h" +#include "nnacl/infer/infer_register.h" int SqueezeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -59,3 +60,5 @@ int SqueezeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC SetShapeArray(outputs[0], out_shape, out_shape_size); return NNACL_OK; } + +REG_INFER(Squeeze, PrimType_Squeeze, SqueezeInferShape) diff --git a/mindspore/lite/nnacl/infer/stack_infer.c b/mindspore/lite/nnacl/infer/stack_infer.c index c135a2f8e9..3bebcd5b0e 100644 --- a/mindspore/lite/nnacl/infer/stack_infer.c +++ b/mindspore/lite/nnacl/infer/stack_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/stack_infer.h" +#include "nnacl/infer/infer_register.h" int StackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -55,3 +56,5 @@ int StackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** SetShapeArray(outputs[0], output_shape, output_shape_size); return NNACL_OK; } + +REG_INFER(Stack, PrimType_Stack, StackInferShape) diff --git a/mindspore/lite/nnacl/infer/strided_slice_grad_infer.c b/mindspore/lite/nnacl/infer/strided_slice_grad_infer.c index 457994a051..cd3127d7ab 100644 --- a/mindspore/lite/nnacl/infer/strided_slice_grad_infer.c +++ b/mindspore/lite/nnacl/infer/strided_slice_grad_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/strided_slice_grad_infer.h" +#include "nnacl/infer/infer_register.h" bool StridedSliceCheckInputs(const TensorC *const *inputs, size_t inputs_size) { for (size_t i = 1; i < inputs_size; ++i) { @@ -137,3 +138,5 @@ int StridedSliceGradInferShape(const TensorC *const *inputs, size_t inputs_size, SetShapeArray(outputs[0], output_shape, output_shape_size); return NNACL_OK; } + +REG_INFER(StridedSliceGrad, PrimType_StridedSliceGrad, StridedSliceGradInferShape) diff --git a/mindspore/lite/nnacl/infer/strided_slice_infer.c b/mindspore/lite/nnacl/infer/strided_slice_infer.c index a6b16b2c0e..c481d0b637 100644 --- a/mindspore/lite/nnacl/infer/strided_slice_infer.c +++ b/mindspore/lite/nnacl/infer/strided_slice_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/strided_slice_infer.h" +#include "nnacl/infer/infer_register.h" const size_t kStridedSliceOutputNum = 1; const size_t kStridedSliceInputNum = 1; @@ -377,3 +378,5 @@ int StridedSliceInferShape(const TensorC *const *inputs, size_t inputs_size, Ten return NNACL_OK; } + +REG_INFER(StridedSlice, PrimType_StridedSlice, StridedSliceInferShape) diff --git a/mindspore/lite/nnacl/infer/switch_infer.c b/mindspore/lite/nnacl/infer/switch_infer.c index 8a1e94f707..84c395162f 100644 --- a/mindspore/lite/nnacl/infer/switch_infer.c +++ b/mindspore/lite/nnacl/infer/switch_infer.c @@ -16,6 +16,7 @@ #include "nnacl/infer/switch_infer.h" #include +#include "nnacl/infer/infer_register.h" int SwitchInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -103,3 +104,5 @@ int SwitchInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * return NNACL_OK; } + +REG_INFER(Switch, PrimType_Switch, SwitchInferShape) diff --git a/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.c b/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.c index 69d4c7869b..d232d4aab9 100644 --- a/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.c +++ b/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/tensorlist_fromtensor_infer.h" +#include "nnacl/infer/infer_register.h" int TensorListFromTensorInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -70,3 +71,5 @@ int TensorListFromTensorInferShape(const TensorC *const *inputs, size_t inputs_s free(tensor_shape.shape_size_); return NNACL_OK; } + +REG_INFER(TensorListFromTensor, PrimType_TensorListFromTensor, TensorListFromTensorInferShape) diff --git a/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.c b/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.c index c3a427f9f0..12c9997d84 100644 --- a/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.c +++ b/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/tensorlist_getitem_infer.h" +#include "nnacl/infer/infer_register.h" int TensorListGetItemInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -81,3 +82,5 @@ int TensorListGetItemInferShape(const TensorC *const *inputs, size_t inputs_size output->format_ = input0->tensors_[index].format_; return NNACL_OK; } + +REG_INFER(TensorListGetItem, PrimType_TensorListGetItem, TensorListGetItemInferShape) diff --git a/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.c b/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.c index 17738d31e3..d9e5db287d 100644 --- a/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.c +++ b/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/tensorlist_reserve_infer.h" +#include "nnacl/infer/infer_register.h" int TensorListReserveInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -74,3 +75,5 @@ int TensorListReserveInferShape(const TensorC *const *inputs, size_t inputs_size free(tmp_shape.shape_); return NNACL_OK; } + +REG_INFER(TensorListReserve, PrimType_TensorListReserve, TensorListReserveInferShape) diff --git a/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.c b/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.c index 3f64ef83f7..c764082b16 100644 --- a/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.c +++ b/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/tensorlist_setitem_infer.h" +#include "nnacl/infer/infer_register.h" int PreJudge(const TensorC *get_index, TensorListC *input0, const TensorC *value_tensor) { if (get_index->data_ == NULL || value_tensor->data_ == NULL) { @@ -117,3 +118,5 @@ int TensorListSetItemInferShape(const TensorC *const *inputs, size_t inputs_size free(out_shape.shape_size_); return NNACL_OK; } + +REG_INFER(TensorListSetItem, PrimType_TensorListSetItem, TensorListSetItemInferShape) diff --git a/mindspore/lite/nnacl/infer/tensorlist_stack_infer.c b/mindspore/lite/nnacl/infer/tensorlist_stack_infer.c index 2dfdd31edc..361e64e576 100644 --- a/mindspore/lite/nnacl/infer/tensorlist_stack_infer.c +++ b/mindspore/lite/nnacl/infer/tensorlist_stack_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/tensorlist_stack_infer.h" +#include "nnacl/infer/infer_register.h" int TensorListStackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -69,3 +70,5 @@ int TensorListStackInferShape(const TensorC *const *inputs, size_t inputs_size, SetShapeArray(output, output_shape, output_shape_size); return NNACL_OK; } + +REG_INFER(TensorListStack, PrimType_TensorListStack, TensorListStackInferShape) diff --git a/mindspore/lite/nnacl/infer/tile_infer.c b/mindspore/lite/nnacl/infer/tile_infer.c index 94af483302..181b57d0da 100644 --- a/mindspore/lite/nnacl/infer/tile_infer.c +++ b/mindspore/lite/nnacl/infer/tile_infer.c @@ -16,6 +16,7 @@ #include "nnacl/infer/tile_infer.h" #include +#include "nnacl/infer/infer_register.h" void TileParamCaffe2Tflite(TileParameter *param, size_t out_shape_size) { if (param->dims_size_ != 0) { @@ -110,3 +111,5 @@ int TileInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o SetShapeArray(output, out_shape, out_shape_size); return NNACL_OK; } + +REG_INFER(Tile, PrimType_TileFusion, TileInferShape) diff --git a/mindspore/lite/nnacl/infer/topk_infer.c b/mindspore/lite/nnacl/infer/topk_infer.c index eea6a99dc1..4885334049 100644 --- a/mindspore/lite/nnacl/infer/topk_infer.c +++ b/mindspore/lite/nnacl/infer/topk_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/topk_infer.h" +#include "nnacl/infer/infer_register.h" int TopKInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -51,3 +52,5 @@ int TopKInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o SetShapeArray(output1, out_shape, out_shape_size); return NNACL_OK; } + +REG_INFER(TopK, PrimType_TopKFusion, TopKInferShape) diff --git a/mindspore/lite/nnacl/infer/transpose_infer.c b/mindspore/lite/nnacl/infer/transpose_infer.c index 3c3909df9c..8b52e8dade 100644 --- a/mindspore/lite/nnacl/infer/transpose_infer.c +++ b/mindspore/lite/nnacl/infer/transpose_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/transpose_infer.h" +#include "nnacl/infer/infer_register.h" bool CheckPermTransFormat(const int *perm, const int *perm_transformat, const size_t size) { for (size_t i = 0; i < size; ++i) { @@ -84,3 +85,5 @@ int TransposeInferShape(const TensorC *const *inputs, size_t inputs_size, Tensor SetShapeArray(output, out_shape, output->shape_size_); return NNACL_OK; } + +REG_INFER(Transpose, PrimType_Transpose, TransposeInferShape) diff --git a/mindspore/lite/nnacl/infer/uniform_real_infer.c b/mindspore/lite/nnacl/infer/uniform_real_infer.c index b9a65caed4..873b6132b2 100644 --- a/mindspore/lite/nnacl/infer/uniform_real_infer.c +++ b/mindspore/lite/nnacl/infer/uniform_real_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/uniform_real_infer.h" +#include "nnacl/infer/infer_register.h" int UniformRealInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -35,3 +36,5 @@ int UniformRealInferShape(const TensorC *const *inputs, size_t inputs_size, Tens outputs[0]->data_type_ = kNumberTypeFloat32; return NNACL_OK; } + +REG_INFER(UniformReal, PrimType_UniformReal, UniformRealInferShape) diff --git a/mindspore/lite/nnacl/infer/unique_infer.c b/mindspore/lite/nnacl/infer/unique_infer.c index 66d9be89fb..f6e6cf428f 100644 --- a/mindspore/lite/nnacl/infer/unique_infer.c +++ b/mindspore/lite/nnacl/infer/unique_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/unique_infer.h" +#include "nnacl/infer/infer_register.h" int UniqueInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -39,3 +40,5 @@ int UniqueInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * SetShapeTensor(output1, input); return NNACL_OK; } + +REG_INFER(Unique, PrimType_Unique, UniqueInferShape) diff --git a/mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.c b/mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.c index ea752b9bef..55f218d54a 100644 --- a/mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.c +++ b/mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/unsorted_segment_sum_infer.h" +#include "nnacl/infer/infer_register.h" int UnsortedSegmentSumInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -39,3 +40,5 @@ int UnsortedSegmentSumInferShape(const TensorC *const *inputs, size_t inputs_siz SetDataTypeFormat(out, x); return NNACL_OK; } + +REG_INFER(UnsortedSegmentSum, PrimType_UnsortedSegmentSum, UnsortedSegmentSumInferShape) diff --git a/mindspore/lite/nnacl/infer/unsqueeze_infer.c b/mindspore/lite/nnacl/infer/unsqueeze_infer.c index 12230b8e16..428b856e27 100644 --- a/mindspore/lite/nnacl/infer/unsqueeze_infer.c +++ b/mindspore/lite/nnacl/infer/unsqueeze_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/unsqueeze_infer.h" +#include "nnacl/infer/infer_register.h" int UnsqueezeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -64,3 +65,5 @@ int UnsqueezeInferShape(const TensorC *const *inputs, size_t inputs_size, Tensor SetShapeArray(output, out_shape, out_shape_size); return NNACL_OK; } + +REG_INFER(Unsqueeze, PrimType_Unsqueeze, UnsqueezeInferShape) diff --git a/mindspore/lite/nnacl/infer/unstack_infer.c b/mindspore/lite/nnacl/infer/unstack_infer.c index 0cd7795b6c..bad2cb768a 100644 --- a/mindspore/lite/nnacl/infer/unstack_infer.c +++ b/mindspore/lite/nnacl/infer/unstack_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/unstack_infer.h" +#include "nnacl/infer/infer_register.h" int UnstackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -53,3 +54,5 @@ int UnstackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC } return NNACL_OK; } + +REG_INFER(Unstack, PrimType_Unstack, UnstackInferShape) diff --git a/mindspore/lite/nnacl/infer/where_infer.c b/mindspore/lite/nnacl/infer/where_infer.c index 0393e3ff5b..5c4830178c 100644 --- a/mindspore/lite/nnacl/infer/where_infer.c +++ b/mindspore/lite/nnacl/infer/where_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/where_infer.h" +#include "nnacl/infer/infer_register.h" int WhereInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -75,3 +76,5 @@ int WhereInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** output->shape_[axisout] = nummax; return NNACL_OK; } + +REG_INFER(Where, PrimType_Where, WhereInferShape) diff --git a/mindspore/lite/nnacl/infer/while_infer.c b/mindspore/lite/nnacl/infer/while_infer.c index 1e0de40e13..b44c31a6bf 100644 --- a/mindspore/lite/nnacl/infer/while_infer.c +++ b/mindspore/lite/nnacl/infer/while_infer.c @@ -15,6 +15,7 @@ */ #include "nnacl/infer/while_infer.h" +#include "nnacl/infer/infer_register.h" int WhileInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { @@ -28,3 +29,5 @@ int WhileInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** return NNACL_OK; } + +REG_INFER(While, PrimType_While, WhileInferShape) diff --git a/mindspore/lite/schema/ops.fbs b/mindspore/lite/schema/ops.fbs index 8cbc03d5e3..8f99072bda 100644 --- a/mindspore/lite/schema/ops.fbs +++ b/mindspore/lite/schema/ops.fbs @@ -1087,4 +1087,3 @@ table ResizeGrad { method: ResizeMethod; align_corners: bool; } - diff --git a/mindspore/lite/src/CMakeLists.txt b/mindspore/lite/src/CMakeLists.txt index 4925ffe13d..4bc1ba1420 100644 --- a/mindspore/lite/src/CMakeLists.txt +++ b/mindspore/lite/src/CMakeLists.txt @@ -149,15 +149,20 @@ endif() if(SUPPORT_GPU STREQUAL opencl) add_subdirectory(runtime/kernel/opencl) - target_link_libraries(mindspore-lite cpu_kernel_mid opencl_kernel_mid nnacl cpu_ops_mid) - target_link_libraries(mindspore-lite_static cpu_kernel_mid opencl_kernel_mid nnacl_mid cpu_ops_mid) + target_link_libraries(mindspore-lite cpu_kernel_mid opencl_kernel_mid + -Wl,--whole-archive nnacl -Wl,--no-whole-archive cpu_ops_mid) + target_link_libraries(mindspore-lite_static cpu_kernel_mid opencl_kernel_mid + -Wl,--whole-archive nnacl_mid -Wl,--no-whole-archive cpu_ops_mid) elseif(SUPPORT_GPU STREQUAL cuda) add_subdirectory(runtime/kernel/cuda) - target_link_libraries(mindspore-lite cpu_kernel_mid cuda_kernel_mid nnacl cpu_ops_mid) - target_link_libraries(mindspore-lite_static cpu_kernel_mid cuda_kernel_mid nnacl_mid cpu_ops_mid) + target_link_libraries(mindspore-lite cpu_kernel_mid cuda_kernel_mid + -Wl,--whole-archive nnacl -Wl,--no-whole-archive cpu_ops_mid) + target_link_libraries(mindspore-lite_static cpu_kernel_mid cuda_kernel_mid + -Wl,--whole-archive nnacl_mid -Wl,--no-whole-archive cpu_ops_mid) else() - target_link_libraries(mindspore-lite cpu_kernel_mid nnacl cpu_ops_mid) - target_link_libraries(mindspore-lite_static cpu_kernel_mid nnacl_mid cpu_ops_mid) + target_link_libraries(mindspore-lite cpu_kernel_mid -Wl,--whole-archive nnacl -Wl,--no-whole-archive cpu_ops_mid) + target_link_libraries(mindspore-lite_static cpu_kernel_mid -Wl,--whole-archive nnacl_mid -Wl,--no-whole-archive + cpu_ops_mid) endif() if(SUPPORT_NPU) add_subdirectory(runtime/agent/npu) diff --git a/mindspore/lite/src/runtime/infer_manager.cc b/mindspore/lite/src/runtime/infer_manager.cc index 938015e542..ac97b1bcff 100644 --- a/mindspore/lite/src/runtime/infer_manager.cc +++ b/mindspore/lite/src/runtime/infer_manager.cc @@ -16,128 +16,12 @@ #include "src/runtime/infer_manager.h" #include "src/common/tensor_util.h" #include "schema/model_generated.h" -#include "nnacl/infer/common_infer.h" -#include "nnacl/infer/adam_infer.h" -#include "nnacl/infer/addn_infer.h" -#include "nnacl/infer/add_sub_grad_infer.h" -#include "nnacl/infer/apply_momentum_infer.h" -#include "nnacl/infer/argmin_max_infer.h" -#include "nnacl/infer/arithmetic_compare_infer.h" -#include "nnacl/infer/arithmetic_grad_infer.h" -#include "nnacl/infer/arithmetic_infer.h" -#include "nnacl/infer/assign_add_infer.h" -#include "nnacl/infer/assign_infer.h" -#include "nnacl/infer/audio_spectrogram_infer.h" -#include "nnacl/infer/batch_to_space_infer.h" -#include "nnacl/infer/bias_grad_infer.h" -#include "nnacl/infer/binary_cross_entropy_infer.h" -#include "nnacl/infer/bn_grad_infer.h" -#include "nnacl/infer/broadcast_to_infer.h" -#include "nnacl/infer/cast_infer.h" -#include "nnacl/infer/concat_infer.h" -#include "nnacl/infer/constant_of_shape_infer.h" -#include "nnacl/infer/conv2d_grad_filter_infer.h" -#include "nnacl/infer/conv2d_grad_input_infer.h" -#include "nnacl/infer/conv2d_infer.h" -#include "nnacl/infer/crop_infer.h" -#include "nnacl/infer/custom_extract_features_infer.h" -#include "nnacl/infer/custom_normalize_infer.h" -#include "nnacl/infer/custom_predict_infer.h" -#include "nnacl/infer/deconv2d_infer.h" -#include "nnacl/infer/dedepthwise_conv2d_infer.h" -#include "nnacl/infer/depth_to_space_infer.h" -#include "nnacl/infer/depthwise_conv2d_infer.h" -#include "nnacl/infer/detection_post_process_infer.h" -#include "nnacl/infer/dropout_grad_infer.h" -#include "nnacl/infer/embedding_lookup_infer.h" -#include "nnacl/infer/expand_dims_infer.h" -#include "nnacl/infer/fft_imag_infer.h" -#include "nnacl/infer/fft_real_infer.h" -#include "nnacl/infer/fill_infer.h" -#include "nnacl/infer/flatten_grad_infer.h" -#include "nnacl/infer/flatten_infer.h" -#include "nnacl/infer/full_connection_infer.h" -#include "nnacl/infer/fused_batchnorm_infer.h" -#include "nnacl/infer/gather_infer.h" -#include "nnacl/infer/gather_nd_infer.h" -#include "nnacl/infer/group_conv2d_grad_input_infer.h" -#include "nnacl/infer/hashtable_lookup_infer.h" -#include "nnacl/infer/layer_norm_infer.h" -#include "nnacl/infer/layer_norm_grad_infer.h" -#include "nnacl/infer/lsh_projection_infer.h" -#include "nnacl/infer/lstm_infer.h" -#include "nnacl/infer/matmul_infer.h" -#include "nnacl/infer/maximum_grad_infer.h" -#include "nnacl/infer/mean_infer.h" -#include "nnacl/infer/mfcc_infer.h" -#include "nnacl/infer/non_max_suppression_infer.h" -#include "nnacl/infer/one_hot_infer.h" -#include "nnacl/infer/pad_infer.h" -#include "nnacl/infer/pooling_grad_infer.h" -#include "nnacl/infer/pooling_infer.h" -#include "nnacl/infer/power_infer.h" -#include "nnacl/infer/quant_dtype_cast_infer.h" -#include "nnacl/infer/range_infer.h" -#include "nnacl/infer/rank_infer.h" -#include "nnacl/infer/reduce_infer.h" -#include "nnacl/infer/reshape_infer.h" -#include "nnacl/infer/resize_infer.h" -#include "nnacl/infer/rfft_infer.h" -#include "nnacl/infer/roi_pooling_infer.h" -#include "nnacl/infer/scatter_nd_infer.h" -#include "nnacl/infer/sgd_infer.h" -#include "nnacl/infer/shape_infer.h" -#include "nnacl/infer/skip_gram_infer.h" -#include "nnacl/infer/slice_infer.h" -#include "nnacl/infer/softmax_cross_entropy_infer.h" -#include "nnacl/infer/softmax_infer.h" -#include "nnacl/infer/space_to_batch_infer.h" -#include "nnacl/infer/space_to_batch_nd_infer.h" -#include "nnacl/infer/space_to_depth_infer.h" -#include "nnacl/infer/sparse_to_dense_infer.h" -#include "nnacl/infer/split_infer.h" -#include "nnacl/infer/squeeze_infer.h" -#include "nnacl/infer/stack_infer.h" -#include "nnacl/infer/strided_slice_infer.h" -#include "nnacl/infer/tile_infer.h" -#include "nnacl/infer/topk_infer.h" -#include "nnacl/infer/transpose_infer.h" -#include "nnacl/infer/unique_infer.h" -#include "nnacl/infer/unsorted_segment_sum_infer.h" -#include "nnacl/infer/unsqueeze_infer.h" -#include "nnacl/infer/unstack_infer.h" -#include "nnacl/infer/where_infer.h" -#include "nnacl/infer/while_infer.h" #include "include/errorcode.h" #include "nnacl/errorcode.h" - #include "src/tensorlist.h" -#include "nnacl/infer/tensorlist_reserve_infer.h" -#include "nnacl/infer/tensorlist_getitem_infer.h" -#include "nnacl/infer/tensorlist_fromtensor_infer.h" -#include "nnacl/infer/tensorlist_setitem_infer.h" -#include "nnacl/infer/tensorlist_stack_infer.h" -#include "nnacl/infer/partial_infer.h" -#include "nnacl/infer/merge_infer.h" -#include "nnacl/infer/switch_infer.h" -#include "nnacl/infer/assert_op_infer.h" -#include "nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.h" -#include "nnacl/infer/dropout_infer.h" -#include "nnacl/infer/prior_box_infer.h" - -#include "nnacl/infer/gru_infer.h" -#include "nnacl/infer/select_infer.h" -#include "nnacl/infer/size_infer.h" -#include "nnacl/infer/invert_permutation_infer.h" -#include "nnacl/infer/random_standard_normal_infer.h" -#include "nnacl/infer/crop_and_resize_infer.h" -#include "nnacl/infer/strided_slice_grad_infer.h" -#include "nnacl/infer/lin_space_infer.h" -#include "nnacl/infer/uniform_real_infer.h" namespace mindspore { namespace lite { - int KernelInferShape(const std::vector &inputs, std::vector *outputs, OpParameter *parameter) { std::vector in_tensors; @@ -157,7 +41,7 @@ int KernelInferShape(const std::vector &inputs, std::vectorGetInferShapeFunc(parameter->type_); + auto infer_shape_func = GetInferFunc(parameter->type_); if (infer_shape_func == nullptr) { MS_LOG(ERROR) << "Get infershape func failed! type:" << PrimitiveCurVersionTypeName(parameter->type_); return RET_ERROR; @@ -194,244 +78,5 @@ int KernelInferShape(const std::vector &inputs, std::vector &tensors_in, std::vector *outputs, OpParameter *parameter); class InferManager {