From 428f45162e50f5e31081f7383eedce1e25ec1506 Mon Sep 17 00:00:00 2001 From: zhanyuan Date: Thu, 8 Apr 2021 16:02:36 +0800 Subject: [PATCH] [CPU] Change the location of nnacl directory --- cmake/package_lite.cmake | 30 +- mindspore/ccsrc/CMakeLists.txt | 5 +- .../backend/kernel_compiler/CMakeLists.txt | 33 +- .../kernel_compiler/cpu}/nnacl/CMakeLists.txt | 0 .../kernel_compiler/cpu}/nnacl/README.md | 0 .../backend/kernel_compiler/cpu/nnacl/adder.h | 32 ++ .../cpu/nnacl/arg_min_max_parameter.h | 54 ++ .../kernel_compiler/cpu/nnacl/arithmetic.h | 46 ++ .../cpu/nnacl/arithmetic_self_parameter.h | 30 ++ .../assembly/arm32/ConvDw3x3Int8BorderPixel.S | 0 .../nnacl/assembly/arm32/ConvDwFp32Border.S | 0 .../nnacl/assembly/arm32/ConvDwFp32Center.S | 0 .../cpu}/nnacl/assembly/arm32/ConvDwFp32Row.S | 0 .../nnacl/assembly/arm32/ConvDwInt8Center.S | 0 .../assembly/arm32/ConvDwInt8PostAlign4.S | 0 .../arm32/ConvDwInt8PostAlign4PerChannel.S | 0 .../cpu}/nnacl/assembly/arm32/ConvDwInt8Row.S | 0 .../nnacl/assembly/arm32/DeconvDwFp32Center.S | 0 .../nnacl/assembly/arm32/DeconvDwInt8Center.S | 0 .../nnacl/assembly/arm32/DeconvDwInt8Post.S | 0 .../arm32/IndirectGemmInt16to32_8x4.S | 0 .../assembly/arm32/IndirectGemmInt8_2x4.S | 0 .../cpu}/nnacl/assembly/arm32/MatVecMulFp32.S | 0 .../cpu}/nnacl/assembly/arm32/MatmulFp32.S | 0 .../cpu}/nnacl/assembly/arm32/MatmulFp32Opt.S | 0 .../nnacl/assembly/arm32/MatmulFp32Opt12x4.S | 0 .../cpu}/nnacl/assembly/arm32/MatmulInt8.S | 0 .../cpu}/nnacl/assembly/arm32/MatmulInt8Opt.S | 0 .../nnacl/assembly/arm32/MatmulWinogradFp32.S | 0 .../nnacl/assembly/arm32/PostFuncBiasReluC4.S | 0 .../nnacl/assembly/arm32/PostFuncBiasReluC8.S | 0 .../assembly/arm32/PreSum4x16Int8Peroc.S | 0 .../nnacl/assembly/arm32/PreSum4x16Int8Pert.S | 0 .../nnacl/assembly/arm32/TiledC4MatmulFp32.S | 0 .../nnacl/assembly/arm32/WinogradTransLeft.S | 0 .../nnacl/assembly/arm32/WinogradTransRight.S | 0 .../cpu}/nnacl/assembly/arm64/AdderFp32.S | 0 .../assembly/arm64/ConvDw3x3Fp32Corner.S | 0 .../assembly/arm64/ConvDw3x3Fp32Horizontal.S | 0 .../assembly/arm64/ConvDw3x3Fp32Stride1.S | 0 .../assembly/arm64/ConvDw3x3Fp32Stride2.S | 0 .../assembly/arm64/ConvDw3x3Fp32Vertical.S | 0 .../cpu}/nnacl/assembly/arm64/ConvDw3x3Int8.S | 0 .../assembly/arm64/ConvDw3x3Int8Corner.S | 0 .../assembly/arm64/ConvDw3x3Int8Horizontal.S | 0 .../assembly/arm64/ConvDw3x3Int8Stride2.S | 0 .../assembly/arm64/ConvDw3x3Int8Vertical.S | 0 .../nnacl/assembly/arm64/ConvDwFp32Border.S | 0 .../nnacl/assembly/arm64/ConvDwFp32Center.S | 0 .../assembly/arm64/ConvDwFp32Indirect3x3.S | 0 .../assembly/arm64/ConvDwFp32Indirect5x5.S | 0 .../cpu}/nnacl/assembly/arm64/ConvDwFp32Row.S | 0 .../nnacl/assembly/arm64/ConvDwInt8Center.S | 0 .../assembly/arm64/ConvDwInt8PostAlign4.S | 0 .../arm64/ConvDwInt8PostAlign4PerChannel.S | 0 .../cpu}/nnacl/assembly/arm64/ConvDwInt8Row.S | 0 .../nnacl/assembly/arm64/ConvFp32Center.S | 0 .../nnacl/assembly/arm64/DeconvDwFp32Border.S | 0 .../nnacl/assembly/arm64/DeconvDwFp32Center.S | 0 .../nnacl/assembly/arm64/DeconvDwInt8Center.S | 0 .../nnacl/assembly/arm64/DeconvDwInt8Post.S | 0 .../arm64/IndirectGemmInt16to32_8x4.S | 0 .../cpu}/nnacl/assembly/arm64/MatVecMulFp32.S | 0 .../cpu}/nnacl/assembly/arm64/MatmulFp32.S | 0 .../cpu}/nnacl/assembly/arm64/MatmulFp32Opt.S | 0 .../cpu}/nnacl/assembly/arm64/MatmulInt8.S | 0 .../cpu}/nnacl/assembly/arm64/MatmulInt8Opt.S | 0 .../cpu}/nnacl/assembly/arm64/MatmulR4Int8.S | 0 .../nnacl/assembly/arm64/MatmulWinogradFp32.S | 0 .../nnacl/assembly/arm64/PostFuncBiasReluC4.S | 0 .../nnacl/assembly/arm64/PostFuncBiasReluC8.S | 0 .../assembly/arm64/PostFuncInt8C4Neon64.S | 0 .../assembly/arm64/PreSum4x16Int8Peroc.S | 0 .../nnacl/assembly/arm64/PreSum4x16Int8Pert.S | 0 .../nnacl/assembly/arm64/TiledC4MatmulFp32.S | 0 .../nnacl/assembly/arm64/WinogradTransLeft.S | 0 .../nnacl/assembly/arm64/WinogradTransRight.S | 0 .../nnacl/assembly/avx/ConvDwFp32Avx3x3.S | 0 .../nnacl/assembly/avx/ConvDwFp32BorderAvx.S | 0 .../nnacl/assembly/avx/ConvDwFp32RowAvx.S | 0 .../cpu}/nnacl/assembly/avx/MatmulAvx.S | 0 .../nnacl/assembly/fp16/ConvDwFp16Border.S | 0 .../nnacl/assembly/fp16/ConvDwFp16Center.S | 0 .../cpu}/nnacl/assembly/fp16/ConvDwFp16Row.S | 0 .../nnacl/assembly/fp16/DeconvDwFp16Border.S | 0 .../nnacl/assembly/fp16/DeconvDwFp16Center.S | 0 .../nnacl/assembly/fp16/Float16ToFloat32.S | 0 .../nnacl/assembly/fp16/Float32ToFloat16.S | 0 .../assembly/fp16/IndirectGemmFp16_16x8.S | 0 .../cpu}/nnacl/assembly/fp16/MatVecMulFp16.S | 0 .../cpu}/nnacl/assembly/fp16/MatmulFp16.S | 0 .../cpu}/nnacl/assembly/fp16/MatmulFp16Opt.S | 0 .../nnacl/assembly/fp16/MatmulWinogradFp16.S | 0 .../assembly/fp16/PostFuncBiasReluC4Fp16.S | 0 .../assembly/fp16/PostFuncBiasReluC8Fp16.S | 0 .../nnacl/assembly/fp16/TiledC4MatmulFp16.S | 0 .../assembly/fp16/WinogradTransLeftFp16.S | 0 .../assembly/fp16/WinogradTransRightFp16.S | 0 .../cpu}/nnacl/assembly/opt/MatmulDpInt8.S | 0 .../cpu}/nnacl/assembly/opt/MatmulDpInt8Opt.S | 0 .../cpu}/nnacl/assembly/opt/MatmulOptR4Int8.S | 0 .../cpu/nnacl/assembly_global.h | 36 ++ .../cpu}/nnacl/base/arithmetic_base.c | 0 .../cpu/nnacl/base/arithmetic_base.h | 34 ++ .../cpu}/nnacl/base/batch_to_space_base.c | 0 .../cpu/nnacl/base/batch_to_space_base.h | 33 ++ .../cpu/nnacl/base/cast_base.h | 104 ++++ .../cpu}/nnacl/base/concat_base.c | 0 .../cpu/nnacl/base/concat_base.h | 32 ++ .../cpu}/nnacl/base/conv1x1_base.c | 0 .../cpu/nnacl/base/conv1x1_base.h | 32 ++ .../cpu}/nnacl/base/depth_to_space_base.c | 0 .../cpu/nnacl/base/depth_to_space_base.h | 30 ++ .../cpu}/nnacl/base/fill_base.c | 0 .../cpu/nnacl/base/fill_base.h | 32 ++ .../cpu}/nnacl/base/gather_base.c | 0 .../cpu/nnacl/base/gather_base.h | 33 ++ .../nnacl/base/minimal_filtering_generator.c | 0 .../nnacl/base/minimal_filtering_generator.h | 63 +++ .../cpu}/nnacl/base/slice_base.c | 0 .../cpu/nnacl/base/slice_base.h | 34 ++ .../cpu}/nnacl/base/space_to_depth_base.c | 0 .../cpu}/nnacl/base/space_to_depth_base.h | 0 .../cpu}/nnacl/base/split_base.c | 0 .../cpu/nnacl/base/split_base.h | 32 ++ .../cpu}/nnacl/base/stack_base.c | 0 .../cpu/nnacl/base/stack_base.h | 30 ++ .../cpu}/nnacl/base/tile_base.c | 0 .../cpu/nnacl/base/tile_base.h | 53 ++ .../cpu}/nnacl/base/unstack_base.c | 0 .../cpu/nnacl/base/unstack_base.h | 32 ++ .../cpu/nnacl/base/zeroslike_base.h | 34 ++ .../cpu/nnacl/batch_to_space.h | 31 ++ .../cpu/nnacl/batchnorm_parameter.h | 32 ++ .../cpu/nnacl/broadcast_to_parameter.h | 34 ++ .../cpu/nnacl/cast_parameter.h | 27 + .../kernel_compiler/cpu}/nnacl/common_func.c | 0 .../kernel_compiler/cpu/nnacl/common_func.h | 77 +++ .../cpu/nnacl/concat_parameter.h | 35 ++ .../cpu/nnacl/constant_of_shape_parameter.h | 31 ++ .../cpu/nnacl/conv_parameter.h | 131 +++++ .../cpu/nnacl/crop_parameter.h | 36 ++ .../cpu/nnacl/depth_to_space_parameter.h | 35 ++ .../nnacl/detection_post_process_parameter.h | 48 ++ .../kernel_compiler/cpu/nnacl/errorcode.h | 62 +++ .../cpu/nnacl/fill_parameter.h | 28 + .../cpu}/nnacl/fp16/activation_fp16.c | 0 .../cpu/nnacl/fp16/activation_fp16.h | 41 ++ .../cpu}/nnacl/fp16/arg_min_max_fp16.c | 0 .../cpu/nnacl/fp16/arg_min_max_fp16.h | 32 ++ .../cpu}/nnacl/fp16/arithmetic_fp16.c | 0 .../cpu/nnacl/fp16/arithmetic_fp16.h | 126 +++++ .../cpu}/nnacl/fp16/arithmetic_self_fp16.c | 0 .../cpu/nnacl/fp16/arithmetic_self_fp16.h | 59 ++ .../cpu}/nnacl/fp16/batchnorm_fp16.c | 0 .../cpu}/nnacl/fp16/batchnorm_fp16.h | 0 .../cpu/nnacl/fp16/cast_fp16.h | 65 +++ .../cpu}/nnacl/fp16/common_func_fp16.c | 0 .../cpu/nnacl/fp16/common_func_fp16.h | 41 ++ .../cpu/nnacl/fp16/constant_of_shape_fp16.h | 41 ++ .../cpu}/nnacl/fp16/conv_depthwise_fp16.c | 0 .../cpu/nnacl/fp16/conv_depthwise_fp16.h | 57 ++ .../cpu}/nnacl/fp16/conv_fp16.c | 0 .../cpu/nnacl/fp16/conv_fp16.h | 57 ++ .../cpu}/nnacl/fp16/crop_fp16.c | 0 .../cpu/nnacl/fp16/crop_fp16.h | 36 ++ .../cpu}/nnacl/fp16/deconv_fp16.c | 0 .../cpu/nnacl/fp16/deconv_fp16.h | 36 ++ .../cpu}/nnacl/fp16/deconv_winograd_fp16.c | 0 .../cpu/nnacl/fp16/deconv_winograd_fp16.h | 48 ++ .../cpu}/nnacl/fp16/exp_fp16.c | 0 .../kernel_compiler/cpu/nnacl/fp16/exp_fp16.h | 70 +++ .../cpu}/nnacl/fp16/gru_fp16.c | 0 .../kernel_compiler/cpu/nnacl/fp16/gru_fp16.h | 30 ++ .../cpu}/nnacl/fp16/instance_norm_fp16.c | 0 .../cpu/nnacl/fp16/instance_norm_fp16.h | 31 ++ .../cpu}/nnacl/fp16/log_softmax_fp16.c | 0 .../cpu}/nnacl/fp16/log_softmax_fp16.h | 0 .../cpu}/nnacl/fp16/lstm_fp16.c | 0 .../cpu/nnacl/fp16/lstm_fp16.h | 49 ++ .../cpu}/nnacl/fp16/matmul_fp16.c | 0 .../cpu/nnacl/fp16/matmul_fp16.h | 68 +++ .../cpu}/nnacl/fp16/matrix_fp16.c | 0 .../cpu/nnacl/fp16/matrix_fp16.h | 36 ++ .../cpu}/nnacl/fp16/pack_fp16.c | 0 .../cpu/nnacl/fp16/pack_fp16.h | 79 +++ .../cpu}/nnacl/fp16/pad_fp16.c | 0 .../kernel_compiler/cpu/nnacl/fp16/pad_fp16.h | 35 ++ .../cpu}/nnacl/fp16/pooling_fp16.c | 0 .../cpu/nnacl/fp16/pooling_fp16.h | 36 ++ .../cpu}/nnacl/fp16/power_fp16.c | 0 .../cpu/nnacl/fp16/power_fp16.h | 63 +++ .../cpu}/nnacl/fp16/quant_dtype_cast_fp16.c | 0 .../cpu/nnacl/fp16/quant_dtype_cast_fp16.h | 38 ++ .../cpu}/nnacl/fp16/reduce_fp16.c | 0 .../cpu/nnacl/fp16/reduce_fp16.h | 36 ++ .../cpu/nnacl/fp16/scale_fp16.c | 223 ++++++++ .../cpu/nnacl/fp16/scale_fp16.h | 38 ++ .../cpu}/nnacl/fp16/softmax_fp16.c | 0 .../cpu/nnacl/fp16/softmax_fp16.h | 35 ++ .../cpu}/nnacl/fp16/transpose_fp16.c | 0 .../cpu/nnacl/fp16/transpose_fp16.h | 35 ++ .../cpu}/nnacl/fp16/winograd_transform_fp16.c | 0 .../cpu/nnacl/fp16/winograd_transform_fp16.h | 63 +++ .../cpu}/nnacl/fp16/winograd_utils_fp16.c | 0 .../cpu/nnacl/fp16/winograd_utils_fp16.h | 502 ++++++++++++++++++ .../cpu}/nnacl/fp16_grad/activation_grad.c | 0 .../cpu/nnacl/fp16_grad/activation_grad.h | 42 ++ .../nnacl/fp16_grad/arithmetic_self_grad.c | 0 .../nnacl/fp16_grad/arithmetic_self_grad.h | 39 ++ .../cpu}/nnacl/fp32/activation_fp32.c | 0 .../cpu/nnacl/fp32/activation_fp32.h | 49 ++ .../cpu}/nnacl/fp32/add_fp32.c | 0 .../kernel_compiler/cpu/nnacl/fp32/add_fp32.h | 45 ++ .../cpu}/nnacl/fp32/adder_fp32.c | 0 .../cpu/nnacl/fp32/adder_fp32.h | 47 ++ .../cpu}/nnacl/fp32/arg_min_max_fp32.c | 0 .../cpu/nnacl/fp32/arg_min_max_fp32.h | 31 ++ .../cpu}/nnacl/fp32/arithmetic_compare_fp32.c | 0 .../cpu/nnacl/fp32/arithmetic_compare_fp32.h | 50 ++ .../cpu}/nnacl/fp32/arithmetic_fp32.c | 0 .../cpu/nnacl/fp32/arithmetic_fp32.h | 71 +++ .../cpu}/nnacl/fp32/arithmetic_self_fp32.c | 0 .../cpu/nnacl/fp32/arithmetic_self_fp32.h | 62 +++ .../cpu}/nnacl/fp32/batchnorm_fp32.c | 0 .../cpu/nnacl/fp32/batchnorm_fp32.h | 37 ++ .../cpu}/nnacl/fp32/broadcast_to_fp32.c | 0 .../cpu/nnacl/fp32/broadcast_to_fp32.h | 30 ++ .../cpu}/nnacl/fp32/common_func_fp32.c | 0 .../cpu/nnacl/fp32/common_func_fp32.h | 105 ++++ .../cpu/nnacl/fp32/constant_of_shape_fp32.h | 45 ++ .../cpu}/nnacl/fp32/conv_common_fp32.c | 0 .../cpu/nnacl/fp32/conv_common_fp32.h | 40 ++ .../cpu}/nnacl/fp32/conv_depthwise_fp32.c | 0 .../cpu/nnacl/fp32/conv_depthwise_fp32.h | 91 ++++ .../cpu}/nnacl/fp32/conv_winograd_fp32.c | 0 .../cpu/nnacl/fp32/conv_winograd_fp32.h | 44 ++ .../cpu}/nnacl/fp32/crop_fp32.c | 0 .../cpu/nnacl/fp32/crop_fp32.h | 34 ++ .../cpu}/nnacl/fp32/deconv_fp32.c | 0 .../cpu/nnacl/fp32/deconv_fp32.h | 37 ++ .../cpu}/nnacl/fp32/deconv_winograd_fp32.c | 0 .../cpu/nnacl/fp32/deconv_winograd_fp32.h | 43 ++ .../nnacl/fp32/detection_post_process_fp32.c | 0 .../nnacl/fp32/detection_post_process_fp32.h | 59 ++ .../cpu}/nnacl/fp32/div_fp32.c | 0 .../kernel_compiler/cpu/nnacl/fp32/div_fp32.h | 43 ++ .../cpu}/nnacl/fp32/elu_fp32.c | 0 .../kernel_compiler/cpu/nnacl/fp32/elu_fp32.h | 39 ++ .../cpu}/nnacl/fp32/embedding_lookup_fp32.c | 0 .../cpu/nnacl/fp32/embedding_lookup_fp32.h | 43 ++ .../cpu}/nnacl/fp32/exp_fp32.c | 0 .../kernel_compiler/cpu/nnacl/fp32/exp_fp32.h | 103 ++++ .../cpu}/nnacl/fp32/gatherNd_fp32.c | 0 .../cpu/nnacl/fp32/gatherNd_fp32.h | 35 ++ .../cpu}/nnacl/fp32/gru_fp32.c | 0 .../kernel_compiler/cpu/nnacl/fp32/gru_fp32.h | 30 ++ .../cpu}/nnacl/fp32/instance_norm_fp32.c | 0 .../cpu/nnacl/fp32/instance_norm_fp32.h | 32 ++ .../cpu}/nnacl/fp32/invert_permutation_fp32.c | 0 .../cpu/nnacl/fp32/invert_permutation_fp32.h | 27 + .../cpu}/nnacl/fp32/l2_norm_fp32.c | 0 .../cpu/nnacl/fp32/l2_norm_fp32.h | 34 ++ .../cpu}/nnacl/fp32/layer_norm_fp32.c | 0 .../cpu/nnacl/fp32/layer_norm_fp32.h | 32 ++ .../nnacl/fp32/local_response_norm_fp32.c | 0 .../cpu/nnacl/fp32/local_response_norm_fp32.h | 40 ++ .../cpu}/nnacl/fp32/log_softmax_fp32.c | 0 .../cpu}/nnacl/fp32/log_softmax_fp32.h | 0 .../cpu}/nnacl/fp32/lstm_fp32.c | 0 .../cpu/nnacl/fp32/lstm_fp32.h | 43 ++ .../cpu}/nnacl/fp32/matmul_fp32.c | 0 .../cpu/nnacl/fp32/matmul_fp32.h | 83 +++ .../cpu}/nnacl/fp32/mul_fp32.c | 0 .../kernel_compiler/cpu/nnacl/fp32/mul_fp32.h | 49 ++ .../cpu}/nnacl/fp32/one_hot_fp32.c | 0 .../cpu/nnacl/fp32/one_hot_fp32.h | 47 ++ .../cpu}/nnacl/fp32/pack_fp32.c | 0 .../cpu/nnacl/fp32/pack_fp32.h | 70 +++ .../cpu}/nnacl/fp32/pad_fp32.c | 0 .../kernel_compiler/cpu/nnacl/fp32/pad_fp32.h | 41 ++ .../cpu}/nnacl/fp32/pooling_fp32.c | 0 .../cpu/nnacl/fp32/pooling_fp32.h | 38 ++ .../cpu}/nnacl/fp32/power_fp32.c | 0 .../cpu/nnacl/fp32/power_fp32.h | 53 ++ .../cpu}/nnacl/fp32/prelu_fp32.c | 0 .../cpu/nnacl/fp32/prelu_fp32.h | 32 ++ .../cpu/nnacl/fp32/prior_box_fp32.h | 38 ++ .../cpu/nnacl/fp32/range_fp32.h | 50 ++ .../cpu/nnacl/fp32/rank_fp32.h | 32 ++ .../cpu}/nnacl/fp32/reduce_fp32.c | 0 .../cpu/nnacl/fp32/reduce_fp32.h | 58 ++ .../cpu}/nnacl/fp32/resize_fp32.c | 0 .../cpu/nnacl/fp32/resize_fp32.h | 69 +++ .../cpu}/nnacl/fp32/reverse_fp32.c | 0 .../cpu/nnacl/fp32/reverse_fp32.h | 41 ++ .../cpu}/nnacl/fp32/reverse_sequence_fp32.c | 0 .../cpu/nnacl/fp32/reverse_sequence_fp32.h | 33 ++ .../cpu}/nnacl/fp32/roi_pooling_fp32.c | 0 .../cpu/nnacl/fp32/roi_pooling_fp32.h | 54 ++ .../cpu}/nnacl/fp32/scale_fp32.c | 0 .../cpu/nnacl/fp32/scale_fp32.h | 35 ++ .../cpu}/nnacl/fp32/scatter_nd_fp32.c | 0 .../cpu/nnacl/fp32/scatter_nd_fp32.h | 30 ++ .../cpu}/nnacl/fp32/softmax_fp32.c | 0 .../cpu/nnacl/fp32/softmax_fp32.h | 32 ++ .../cpu}/nnacl/fp32/space_to_batch_fp32.c | 0 .../cpu}/nnacl/fp32/space_to_batch_fp32.h | 0 .../cpu}/nnacl/fp32/sparse_to_dense_fp32.c | 0 .../cpu/nnacl/fp32/sparse_to_dense_fp32.h | 30 ++ .../cpu}/nnacl/fp32/splice_fp32.c | 0 .../cpu/nnacl/fp32/splice_fp32.h | 31 ++ .../cpu/nnacl/fp32/squared_difference.c | 28 + .../cpu/nnacl/fp32/squared_difference.h | 37 ++ .../cpu}/nnacl/fp32/strided_slice_fp32.c | 0 .../cpu/nnacl/fp32/strided_slice_fp32.h | 33 ++ .../cpu}/nnacl/fp32/sub_fp32.c | 0 .../kernel_compiler/cpu/nnacl/fp32/sub_fp32.h | 43 ++ .../cpu}/nnacl/fp32/topk_fp32.c | 0 .../cpu/nnacl/fp32/topk_fp32.h | 47 ++ .../cpu}/nnacl/fp32/transpose_fp32.c | 0 .../cpu/nnacl/fp32/transpose_fp32.h | 35 ++ .../cpu}/nnacl/fp32/unique_fp32.c | 0 .../cpu/nnacl/fp32/unique_fp32.h | 35 ++ .../cpu}/nnacl/fp32/where_fp32.c | 0 .../cpu/nnacl/fp32/where_fp32.h | 31 ++ .../cpu}/nnacl/fp32/winograd_transform.c | 0 .../cpu/nnacl/fp32/winograd_transform.h | 43 ++ .../cpu}/nnacl/fp32/winograd_utils.c | 0 .../cpu/nnacl/fp32/winograd_utils.h | 316 +++++++++++ .../cpu}/nnacl/fp32_grad/activation_grad.c | 0 .../cpu/nnacl/fp32_grad/activation_grad.h | 47 ++ .../cpu}/nnacl/fp32_grad/arithmetic_grad.c | 0 .../cpu/nnacl/fp32_grad/arithmetic_grad.h | 38 ++ .../cpu}/nnacl/fp32_grad/batch_norm.c | 0 .../cpu/nnacl/fp32_grad/batch_norm.h | 42 ++ .../nnacl/fp32_grad/binary_cross_entropy.c | 0 .../nnacl/fp32_grad/binary_cross_entropy.h | 36 ++ .../fp32_grad/binary_cross_entropy_grad.c | 0 .../fp32_grad/binary_cross_entropy_grad.h | 36 ++ .../nnacl/fp32_grad/convolution_grad_filter.c | 0 .../nnacl/fp32_grad/convolution_grad_filter.h | 32 ++ .../cpu}/nnacl/fp32_grad/dropout_grad.c | 0 .../cpu/nnacl/fp32_grad/dropout_grad.h | 31 ++ .../cpu/nnacl/fp32_grad/dropout_parameter.h | 27 + .../cpu}/nnacl/fp32_grad/gemm.c | 0 .../cpu/nnacl/fp32_grad/gemm.h | 45 ++ .../cpu}/nnacl/fp32_grad/layernorm_grad.c | 0 .../cpu/nnacl/fp32_grad/layernorm_grad.h | 29 + .../nnacl/fp32_grad/layernormgrad_parameter.h | 27 + .../cpu/nnacl/fp32_grad/optimizer.h | 40 ++ .../cpu}/nnacl/fp32_grad/pack_ext.c | 0 .../cpu/nnacl/fp32_grad/pack_ext.h | 39 ++ .../cpu}/nnacl/fp32_grad/pooling_grad.c | 0 .../cpu/nnacl/fp32_grad/pooling_grad.h | 32 ++ .../cpu}/nnacl/fp32_grad/reduce_grad.c | 0 .../cpu/nnacl/fp32_grad/reduce_grad.h | 30 ++ .../cpu}/nnacl/fp32_grad/resize_grad.c | 0 .../cpu/nnacl/fp32_grad/resize_grad.h | 44 ++ .../cpu/nnacl/fp32_grad/smooth_l1_loss.h | 27 + .../cpu}/nnacl/fp32_grad/softmax_grad.c | 0 .../cpu/nnacl/fp32_grad/softmax_grad.h | 47 ++ .../cpu}/nnacl/fp32_grad/strided_slice_grad.c | 0 .../cpu/nnacl/fp32_grad/strided_slice_grad.h | 30 ++ .../nnacl/fp32_grad/unsorted_segment_sum.c | 0 .../nnacl/fp32_grad/unsorted_segment_sum.h | 29 + .../cpu/nnacl/fp32_grad/utils.h | 72 +++ .../cpu}/nnacl/gather_parameter.h | 0 .../cpu/nnacl/gelu_parameter.h | 28 + .../kernel_compiler/cpu/nnacl/gru_parameter.h | 38 ++ .../cpu}/nnacl/infer/adam_infer.c | 0 .../cpu/nnacl/infer/adam_infer.h | 31 ++ .../cpu}/nnacl/infer/add_sub_grad_infer.c | 0 .../cpu/nnacl/infer/add_sub_grad_infer.h | 31 ++ .../cpu}/nnacl/infer/addn_infer.c | 0 .../cpu/nnacl/infer/addn_infer.h | 31 ++ .../cpu}/nnacl/infer/apply_momentum_infer.c | 0 .../cpu/nnacl/infer/apply_momentum_infer.h | 31 ++ .../cpu}/nnacl/infer/argmin_max_infer.c | 0 .../cpu/nnacl/infer/argmin_max_infer.h | 32 ++ .../nnacl/infer/arithmetic_compare_infer.c | 0 .../nnacl/infer/arithmetic_compare_infer.h | 31 ++ .../cpu}/nnacl/infer/arithmetic_grad_infer.c | 0 .../cpu/nnacl/infer/arithmetic_grad_infer.h | 31 ++ .../cpu}/nnacl/infer/arithmetic_infer.c | 0 .../cpu/nnacl/infer/arithmetic_infer.h | 32 ++ .../cpu}/nnacl/infer/assert_op_infer.c | 0 .../cpu/nnacl/infer/assert_op_infer.h | 31 ++ .../cpu}/nnacl/infer/assign_add_infer.c | 0 .../cpu/nnacl/infer/assign_add_infer.h | 31 ++ .../cpu}/nnacl/infer/assign_infer.c | 0 .../cpu/nnacl/infer/assign_infer.h | 31 ++ .../nnacl/infer/audio_spectrogram_infer.c | 0 .../cpu/nnacl/infer/audio_spectrogram_infer.h | 37 ++ .../cpu}/nnacl/infer/batch_to_space_infer.c | 0 .../cpu/nnacl/infer/batch_to_space_infer.h | 32 ++ .../cpu}/nnacl/infer/bias_grad_infer.c | 0 .../cpu/nnacl/infer/bias_grad_infer.h | 31 ++ .../nnacl/infer/binary_cross_entropy_infer.c | 0 .../nnacl/infer/binary_cross_entropy_infer.h | 32 ++ .../cpu}/nnacl/infer/bn_grad_infer.c | 0 .../cpu/nnacl/infer/bn_grad_infer.h | 31 ++ .../cpu}/nnacl/infer/broadcast_to_infer.c | 0 .../cpu/nnacl/infer/broadcast_to_infer.h | 32 ++ .../cpu}/nnacl/infer/cast_infer.c | 0 .../cpu/nnacl/infer/cast_infer.h | 31 ++ .../cpu}/nnacl/infer/common_infer.c | 0 .../cpu/nnacl/infer/common_infer.h | 212 ++++++++ .../cpu}/nnacl/infer/concat_infer.c | 0 .../cpu/nnacl/infer/concat_infer.h | 32 ++ .../nnacl/infer/constant_of_shape_infer.c | 0 .../cpu/nnacl/infer/constant_of_shape_infer.h | 32 ++ .../nnacl/infer/conv2d_grad_filter_infer.c | 0 .../nnacl/infer/conv2d_grad_filter_infer.h | 32 ++ .../nnacl/infer/conv2d_grad_input_infer.c | 0 .../cpu/nnacl/infer/conv2d_grad_input_infer.h | 32 ++ .../cpu}/nnacl/infer/conv2d_infer.c | 0 .../cpu/nnacl/infer/conv2d_infer.h | 32 ++ .../cpu}/nnacl/infer/crop_and_resize_infer.c | 0 .../cpu/nnacl/infer/crop_and_resize_infer.h | 31 ++ .../cpu}/nnacl/infer/crop_infer.c | 0 .../cpu/nnacl/infer/crop_infer.h | 32 ++ .../infer/custom_extract_features_infer.c | 0 .../infer/custom_extract_features_infer.h | 31 ++ .../cpu}/nnacl/infer/custom_normalize_infer.c | 0 .../cpu/nnacl/infer/custom_normalize_infer.h | 32 ++ .../cpu}/nnacl/infer/custom_predict_infer.c | 0 .../cpu/nnacl/infer/custom_predict_infer.h | 36 ++ .../cpu}/nnacl/infer/deconv2d_infer.c | 0 .../cpu/nnacl/infer/deconv2d_infer.h | 32 ++ .../nnacl/infer/dedepthwise_conv2d_infer.c | 0 .../nnacl/infer/dedepthwise_conv2d_infer.h | 32 ++ .../cpu}/nnacl/infer/depth_to_space_infer.c | 0 .../cpu/nnacl/infer/depth_to_space_infer.h | 32 ++ .../cpu}/nnacl/infer/depthwise_conv2d_infer.c | 0 .../cpu/nnacl/infer/depthwise_conv2d_infer.h | 32 ++ .../infer/detection_post_process_infer.c | 0 .../infer/detection_post_process_infer.h | 32 ++ .../cpu}/nnacl/infer/dropout_grad_infer.c | 0 .../cpu/nnacl/infer/dropout_grad_infer.h | 31 ++ .../cpu}/nnacl/infer/dropout_infer.c | 0 .../cpu/nnacl/infer/dropout_infer.h | 31 ++ .../cpu}/nnacl/infer/embedding_lookup_infer.c | 0 .../cpu/nnacl/infer/embedding_lookup_infer.h | 31 ++ .../cpu}/nnacl/infer/expand_dims_infer.c | 0 .../cpu/nnacl/infer/expand_dims_infer.h | 31 ++ .../cpu}/nnacl/infer/fft_imag_infer.c | 0 .../cpu/nnacl/infer/fft_imag_infer.h | 31 ++ .../cpu}/nnacl/infer/fft_real_infer.c | 0 .../cpu/nnacl/infer/fft_real_infer.h | 31 ++ .../cpu}/nnacl/infer/fill_infer.c | 0 .../cpu/nnacl/infer/fill_infer.h | 31 ++ .../cpu}/nnacl/infer/flatten_grad_infer.c | 0 .../cpu/nnacl/infer/flatten_grad_infer.h | 31 ++ .../cpu}/nnacl/infer/flatten_infer.c | 0 .../cpu/nnacl/infer/flatten_infer.h | 31 ++ .../cpu}/nnacl/infer/full_connection_infer.c | 0 .../cpu/nnacl/infer/full_connection_infer.h | 32 ++ .../cpu}/nnacl/infer/fused_batchnorm_infer.c | 0 .../cpu/nnacl/infer/fused_batchnorm_infer.h | 31 ++ .../cpu}/nnacl/infer/gather_infer.c | 0 .../cpu/nnacl/infer/gather_infer.h | 32 ++ .../cpu}/nnacl/infer/gather_nd_infer.c | 0 .../cpu/nnacl/infer/gather_nd_infer.h | 32 ++ .../infer/group_conv2d_grad_input_infer.c | 0 .../infer/group_conv2d_grad_input_infer.h | 32 ++ .../cpu}/nnacl/infer/gru_infer.c | 0 .../cpu/nnacl/infer/gru_infer.h | 32 ++ .../cpu}/nnacl/infer/hashtable_lookup_infer.c | 0 .../cpu/nnacl/infer/hashtable_lookup_infer.h | 31 ++ .../kernel_compiler/cpu/nnacl/infer/infer.h | 33 ++ .../cpu}/nnacl/infer/infer_register.c | 0 .../cpu/nnacl/infer/infer_register.h | 233 ++++++++ .../nnacl/infer/invert_permutation_infer.c | 0 .../nnacl/infer/invert_permutation_infer.h | 31 ++ .../cpu}/nnacl/infer/layer_norm_grad_infer.c | 0 .../cpu/nnacl/infer/layer_norm_grad_infer.h | 31 ++ .../cpu}/nnacl/infer/layer_norm_infer.c | 0 .../cpu/nnacl/infer/layer_norm_infer.h | 32 ++ .../cpu}/nnacl/infer/lin_space_infer.c | 0 .../cpu/nnacl/infer/lin_space_infer.h | 31 ++ .../cpu}/nnacl/infer/log_softmax_infer.c | 0 .../cpu}/nnacl/infer/log_softmax_infer.h | 0 .../cpu}/nnacl/infer/lsh_projection_infer.c | 0 .../cpu/nnacl/infer/lsh_projection_infer.h | 32 ++ .../cpu}/nnacl/infer/lstm_infer.c | 0 .../cpu/nnacl/infer/lstm_infer.h | 32 ++ .../cpu}/nnacl/infer/matmul_infer.c | 0 .../cpu/nnacl/infer/matmul_infer.h | 32 ++ .../cpu}/nnacl/infer/max_min_grad_infer.c | 0 .../cpu/nnacl/infer/max_min_grad_infer.h | 31 ++ .../cpu}/nnacl/infer/mean_infer.c | 0 .../cpu/nnacl/infer/mean_infer.h | 32 ++ .../cpu}/nnacl/infer/merge_infer.c | 0 .../cpu/nnacl/infer/merge_infer.h | 32 ++ .../cpu}/nnacl/infer/mfcc_infer.c | 0 .../cpu/nnacl/infer/mfcc_infer.h | 36 ++ .../nnacl/infer/non_max_suppression_infer.c | 0 .../nnacl/infer/non_max_suppression_infer.h | 31 ++ .../cpu}/nnacl/infer/one_hot_infer.c | 0 .../cpu/nnacl/infer/one_hot_infer.h | 32 ++ .../cpu}/nnacl/infer/pad_infer.c | 0 .../cpu/nnacl/infer/pad_infer.h | 32 ++ .../cpu}/nnacl/infer/partial_infer.c | 0 .../cpu/nnacl/infer/partial_infer.h | 32 ++ .../cpu}/nnacl/infer/pooling_grad_infer.c | 0 .../cpu/nnacl/infer/pooling_grad_infer.h | 32 ++ .../cpu}/nnacl/infer/pooling_infer.c | 0 .../cpu/nnacl/infer/pooling_infer.h | 32 ++ .../cpu}/nnacl/infer/power_infer.c | 0 .../cpu/nnacl/infer/power_infer.h | 32 ++ .../cpu}/nnacl/infer/prior_box_infer.c | 0 .../cpu/nnacl/infer/prior_box_infer.h | 32 ++ .../cpu}/nnacl/infer/quant_dtype_cast_infer.c | 0 .../cpu/nnacl/infer/quant_dtype_cast_infer.h | 37 ++ .../infer/random_standard_normal_infer.c | 0 .../infer/random_standard_normal_infer.h | 31 ++ .../cpu}/nnacl/infer/range_infer.c | 0 .../cpu/nnacl/infer/range_infer.h | 32 ++ .../cpu}/nnacl/infer/rank_infer.c | 0 .../cpu/nnacl/infer/rank_infer.h | 31 ++ .../cpu}/nnacl/infer/reduce_infer.c | 0 .../cpu/nnacl/infer/reduce_infer.h | 32 ++ .../cpu}/nnacl/infer/reshape_infer.c | 0 .../cpu/nnacl/infer/reshape_infer.h | 32 ++ .../cpu}/nnacl/infer/resize_infer.c | 0 .../cpu/nnacl/infer/resize_infer.h | 32 ++ .../cpu}/nnacl/infer/rfft_infer.c | 0 .../cpu/nnacl/infer/rfft_infer.h | 36 ++ .../cpu}/nnacl/infer/roi_pooling_infer.c | 0 .../cpu/nnacl/infer/roi_pooling_infer.h | 32 ++ .../cpu}/nnacl/infer/scatter_nd_infer.c | 0 .../cpu/nnacl/infer/scatter_nd_infer.h | 32 ++ .../cpu}/nnacl/infer/select_infer.c | 0 .../cpu/nnacl/infer/select_infer.h | 31 ++ .../cpu}/nnacl/infer/sgd_infer.c | 0 .../cpu/nnacl/infer/sgd_infer.h | 31 ++ .../cpu}/nnacl/infer/shape_infer.c | 0 .../cpu/nnacl/infer/shape_infer.h | 31 ++ .../cpu}/nnacl/infer/size_infer.c | 0 .../cpu/nnacl/infer/size_infer.h | 31 ++ .../cpu}/nnacl/infer/skip_gram_infer.c | 0 .../cpu/nnacl/infer/skip_gram_infer.h | 31 ++ .../cpu}/nnacl/infer/slice_infer.c | 0 .../cpu/nnacl/infer/slice_infer.h | 32 ++ .../nnacl/infer/softmax_cross_entropy_infer.c | 0 .../nnacl/infer/softmax_cross_entropy_infer.h | 31 ++ .../cpu}/nnacl/infer/softmax_infer.c | 0 .../cpu/nnacl/infer/softmax_infer.h | 32 ++ .../cpu}/nnacl/infer/space_to_batch_infer.c | 0 .../cpu/nnacl/infer/space_to_batch_infer.h | 32 ++ .../nnacl/infer/space_to_batch_nd_infer.c | 0 .../cpu/nnacl/infer/space_to_batch_nd_infer.h | 32 ++ .../cpu}/nnacl/infer/space_to_depth_infer.c | 0 .../cpu/nnacl/infer/space_to_depth_infer.h | 32 ++ ..._softmax_cross_entropy_with_logits_infer.c | 0 ..._softmax_cross_entropy_with_logits_infer.h | 31 ++ .../cpu}/nnacl/infer/sparse_to_dense_infer.c | 0 .../cpu/nnacl/infer/sparse_to_dense_infer.h | 31 ++ .../cpu}/nnacl/infer/splice_infer.c | 0 .../cpu/nnacl/infer/splice_infer.h | 32 ++ .../cpu}/nnacl/infer/split_infer.c | 0 .../cpu/nnacl/infer/split_infer.h | 32 ++ .../cpu}/nnacl/infer/squeeze_infer.c | 0 .../cpu/nnacl/infer/squeeze_infer.h | 32 ++ .../cpu}/nnacl/infer/stack_infer.c | 0 .../cpu/nnacl/infer/stack_infer.h | 32 ++ .../nnacl/infer/strided_slice_grad_infer.c | 0 .../nnacl/infer/strided_slice_grad_infer.h | 32 ++ .../cpu}/nnacl/infer/strided_slice_infer.c | 0 .../cpu/nnacl/infer/strided_slice_infer.h | 32 ++ .../cpu}/nnacl/infer/switch_infer.c | 0 .../cpu/nnacl/infer/switch_infer.h | 32 ++ .../nnacl/infer/tensorlist_fromtensor_infer.c | 0 .../nnacl/infer/tensorlist_fromtensor_infer.h | 31 ++ .../nnacl/infer/tensorlist_getitem_infer.c | 0 .../nnacl/infer/tensorlist_getitem_infer.h | 32 ++ .../nnacl/infer/tensorlist_reserve_infer.c | 0 .../nnacl/infer/tensorlist_reserve_infer.h | 31 ++ .../nnacl/infer/tensorlist_setitem_infer.c | 0 .../nnacl/infer/tensorlist_setitem_infer.h | 31 ++ .../cpu}/nnacl/infer/tensorlist_stack_infer.c | 0 .../cpu/nnacl/infer/tensorlist_stack_infer.h | 31 ++ .../cpu}/nnacl/infer/tile_infer.c | 0 .../cpu/nnacl/infer/tile_infer.h | 32 ++ .../cpu}/nnacl/infer/topk_infer.c | 0 .../cpu/nnacl/infer/topk_infer.h | 32 ++ .../cpu}/nnacl/infer/transpose_infer.c | 0 .../cpu/nnacl/infer/transpose_infer.h | 32 ++ .../cpu}/nnacl/infer/uniform_real_infer.c | 0 .../cpu/nnacl/infer/uniform_real_infer.h | 31 ++ .../cpu}/nnacl/infer/unique_infer.c | 0 .../cpu/nnacl/infer/unique_infer.h | 31 ++ .../nnacl/infer/unsorted_segment_sum_infer.c | 0 .../nnacl/infer/unsorted_segment_sum_infer.h | 36 ++ .../cpu}/nnacl/infer/unsqueeze_infer.c | 0 .../cpu/nnacl/infer/unsqueeze_infer.h | 32 ++ .../cpu}/nnacl/infer/unstack_infer.c | 0 .../cpu/nnacl/infer/unstack_infer.h | 32 ++ .../cpu}/nnacl/infer/where_infer.c | 0 .../cpu/nnacl/infer/where_infer.h | 31 ++ .../cpu}/nnacl/infer/while_infer.c | 0 .../cpu/nnacl/infer/while_infer.h | 31 ++ .../cpu/nnacl/instance_norm_parameter.h | 32 ++ .../cpu}/nnacl/int8/add_int8.c | 0 .../kernel_compiler/cpu/nnacl/int8/add_int8.h | 73 +++ .../cpu}/nnacl/int8/arg_min_max_int8.c | 0 .../cpu/nnacl/int8/arg_min_max_int8.h | 41 ++ .../cpu}/nnacl/int8/arithmetic_int8.c | 0 .../cpu/nnacl/int8/arithmetic_int8.h | 51 ++ .../cpu}/nnacl/int8/arithmetic_self_int8.c | 0 .../cpu/nnacl/int8/arithmetic_self_int8.h | 59 ++ .../cpu}/nnacl/int8/batch_to_space_int8.c | 0 .../cpu/nnacl/int8/batch_to_space_int8.h | 32 ++ .../cpu}/nnacl/int8/batchnorm_int8.c | 0 .../cpu/nnacl/int8/batchnorm_int8.h | 34 ++ .../cpu}/nnacl/int8/common_func_int8.c | 0 .../cpu/nnacl/int8/common_func_int8.h | 94 ++++ .../cpu}/nnacl/int8/concat_int8.c | 0 .../cpu/nnacl/int8/concat_int8.h | 32 ++ .../cpu}/nnacl/int8/conv1x1_int8.c | 0 .../cpu/nnacl/int8/conv1x1_int8.h | 45 ++ .../cpu}/nnacl/int8/conv3x3_int8.c | 0 .../cpu/nnacl/int8/conv3x3_int8.h | 48 ++ .../cpu}/nnacl/int8/conv_depthwise_int8.c | 0 .../cpu/nnacl/int8/conv_depthwise_int8.h | 49 ++ .../cpu}/nnacl/int8/conv_int8.c | 0 .../cpu/nnacl/int8/conv_int8.h | 44 ++ .../cpu}/nnacl/int8/crop_int8.c | 0 .../cpu/nnacl/int8/crop_int8.h | 34 ++ .../cpu}/nnacl/int8/deconv_int8.c | 0 .../cpu/nnacl/int8/deconv_int8.h | 46 ++ .../cpu}/nnacl/int8/depth_to_space_int8.c | 0 .../cpu/nnacl/int8/depth_to_space_int8.h | 31 ++ .../cpu}/nnacl/int8/div_int8.c | 0 .../kernel_compiler/cpu/nnacl/int8/div_int8.h | 33 ++ .../cpu}/nnacl/int8/fixed_point.c | 0 .../cpu/nnacl/int8/fixed_point.h | 74 +++ .../cpu}/nnacl/int8/gatherNd_int8.c | 0 .../cpu}/nnacl/int8/gatherNd_int8.h | 0 .../cpu}/nnacl/int8/gather_int8.c | 0 .../cpu}/nnacl/int8/gather_int8.h | 0 .../cpu}/nnacl/int8/hswish_int8.c | 0 .../cpu/nnacl/int8/hswish_int8.h | 43 ++ .../cpu}/nnacl/int8/l2_norm_int8.c | 0 .../cpu/nnacl/int8/l2_norm_int8.h | 32 ++ .../cpu}/nnacl/int8/layer_norm_int8.c | 0 .../cpu/nnacl/int8/layer_norm_int8.h | 34 ++ .../cpu}/nnacl/int8/leaky_relu_int8.c | 0 .../cpu}/nnacl/int8/leaky_relu_int8.h | 0 .../cpu}/nnacl/int8/matmul_int8.c | 0 .../cpu/nnacl/int8/matmul_int8.h | 84 +++ .../cpu}/nnacl/int8/mul_int8.c | 0 .../kernel_compiler/cpu/nnacl/int8/mul_int8.h | 38 ++ .../cpu}/nnacl/int8/pack_int8.c | 0 .../cpu/nnacl/int8/pack_int8.h | 62 +++ .../cpu}/nnacl/int8/pad_int8.c | 0 .../kernel_compiler/cpu/nnacl/int8/pad_int8.h | 35 ++ .../cpu}/nnacl/int8/pooling_int8.c | 0 .../cpu/nnacl/int8/pooling_int8.h | 44 ++ .../cpu}/nnacl/int8/power_int8.c | 0 .../cpu/nnacl/int8/power_int8.h | 32 ++ .../cpu}/nnacl/int8/quant_dtype_cast_int8.c | 0 .../cpu/nnacl/int8/quant_dtype_cast_int8.h | 42 ++ .../cpu}/nnacl/int8/quantize.c | 0 .../kernel_compiler/cpu/nnacl/int8/quantize.h | 219 ++++++++ .../cpu}/nnacl/int8/reduce_int8.c | 0 .../cpu/nnacl/int8/reduce_int8.h | 70 +++ .../cpu}/nnacl/int8/relux_int8.c | 0 .../cpu/nnacl/int8/relux_int8.h | 43 ++ .../cpu}/nnacl/int8/reshape_int8.c | 0 .../cpu/nnacl/int8/reshape_int8.h | 32 ++ .../cpu}/nnacl/int8/resize_int8.c | 0 .../cpu/nnacl/int8/resize_int8.h | 50 ++ .../cpu}/nnacl/int8/scale_int8.c | 0 .../cpu/nnacl/int8/scale_int8.h | 35 ++ .../cpu}/nnacl/int8/sigmoid_int8.c | 0 .../cpu}/nnacl/int8/sigmoid_int8.h | 0 .../cpu}/nnacl/int8/slice_int8.c | 0 .../cpu/nnacl/int8/slice_int8.h | 34 ++ .../cpu}/nnacl/int8/softmax_int8.c | 0 .../cpu/nnacl/int8/softmax_int8.h | 35 ++ .../cpu}/nnacl/int8/space_to_batch_int8.c | 0 .../cpu/nnacl/int8/space_to_batch_int8.h | 32 ++ .../cpu}/nnacl/int8/splice_int8.c | 0 .../cpu/nnacl/int8/splice_int8.h | 30 ++ .../cpu}/nnacl/int8/split_int8.c | 0 .../cpu/nnacl/int8/split_int8.h | 33 ++ .../cpu}/nnacl/int8/squeeze_int8.c | 0 .../cpu}/nnacl/int8/squeeze_int8.h | 0 .../cpu}/nnacl/int8/sub_int8.c | 0 .../kernel_compiler/cpu/nnacl/int8/sub_int8.h | 31 ++ .../cpu}/nnacl/int8/tanh_int8.c | 0 .../cpu/nnacl/int8/tanh_int8.h | 43 ++ .../cpu}/nnacl/int8/topk_int8.c | 0 .../cpu/nnacl/int8/topk_int8.h | 36 ++ .../cpu}/nnacl/int8/transpose_int8.c | 0 .../cpu/nnacl/int8/transpose_int8.h | 35 ++ .../cpu}/nnacl/int8/unsqueeze_int8.c | 0 .../cpu}/nnacl/int8/unsqueeze_int8.h | 0 .../cpu}/nnacl/intrinsics/avx/common_utils.c | 0 .../cpu/nnacl/intrinsics/avx/common_utils.h | 44 ++ .../nnacl/intrinsics/ms_simd_instructions.h | 229 ++++++++ .../intrinsics/sse/ConvDwFp32IndirectRow.c | 0 .../nnacl/intrinsics/sse/ConvDwFp32Row_sse.c | 0 .../nnacl/intrinsics/sse/DepthwiseFp32_Sse.c | 0 .../cpu}/nnacl/intrinsics/sse/MatMul_Sse.c | 0 .../nnacl/intrinsics/sse/PostFuncBiasReluC4.c | 0 .../nnacl/intrinsics/sse/PostFuncBiasReluC8.c | 0 .../nnacl/intrinsics/sse/TiledC4MatMulFp32.c | 0 .../cpu}/nnacl/intrinsics/sse/WinogradTrans.c | 0 .../cpu}/nnacl/intrinsics/sse/sse_common.c | 0 .../cpu/nnacl/intrinsics/sse/sse_common.h | 56 ++ .../cpu/nnacl/l2_norm_parameter.h | 41 ++ .../cpu/nnacl/layer_norm_parameter.h | 50 ++ .../cpu/nnacl/lsh_projection_parameter.h | 35 ++ .../cpu/nnacl/lstm_parameter.h | 40 ++ .../cpu/nnacl/matmul_parameter.h | 79 +++ .../kernel_compiler/cpu/nnacl/mul_parameter.h | 40 ++ .../kernel_compiler/cpu}/nnacl/nnacl_common.c | 0 .../kernel_compiler/cpu/nnacl/nnacl_common.h | 60 +++ .../kernel_compiler/cpu}/nnacl/nnacl_utils.c | 0 .../kernel_compiler/cpu/nnacl/nnacl_utils.h | 39 ++ .../cpu/nnacl/non_max_suppression_parameter.h | 28 + .../kernel_compiler/cpu/nnacl/op_base.h | 107 ++++ .../cpu}/nnacl/optimize/CMakeLists.txt | 0 .../backend/kernel_compiler/cpu/nnacl/pack.h | 31 ++ .../kernel_compiler/cpu/nnacl/pad_parameter.h | 51 ++ .../cpu/nnacl/pooling_parameter.h | 57 ++ .../cpu/nnacl/power_parameter.h | 41 ++ .../cpu/nnacl/predict_parameter.h | 32 ++ .../cpu/nnacl/prelu_parameter.h | 32 ++ .../cpu/nnacl/prior_box_parameter.h | 40 ++ .../cpu/nnacl/random_parameter.h | 27 + .../cpu/nnacl/reduce_parameter.h | 34 ++ .../cpu/nnacl/reshape_parameter.h | 40 ++ .../cpu/nnacl/resize_parameter.h | 37 ++ .../cpu/nnacl/reverse_sequence_parameter.h | 45 ++ .../backend/kernel_compiler/cpu/nnacl/scale.h | 46 ++ .../cpu/nnacl/sigmoid_parameter.h | 41 ++ .../cpu/nnacl/skip_gram_parameter.h | 30 ++ .../cpu/nnacl/slice_parameter.h | 45 ++ .../cpu/nnacl/softmax_parameter.h | 35 ++ .../cpu}/nnacl/space_to_depth_parameter.h | 0 .../cpu/nnacl/sparse_to_dense_parameter.h | 31 ++ .../cpu/nnacl/splice_parameter.h | 29 + .../cpu/nnacl/split_parameter.h | 47 ++ .../cpu/nnacl/squeeze_parameter.h | 46 ++ .../cpu/nnacl/stack_parameter.h | 27 + .../cpu/nnacl/strided_slice_parameter.h | 43 ++ .../kernel_compiler/cpu/nnacl/tensor_c.h | 29 + .../cpu/nnacl/tensorlist_parameter.h | 32 ++ .../kernel_compiler/cpu/nnacl/transpose.h | 40 ++ .../cpu/nnacl/unsqueeze_parameter.h | 48 ++ .../cpu/nnacl/unstack_parameter.h | 34 ++ .../cpu/nnacl/upsample_parameter.h | 29 + .../cpu/nnacl/where_parameter.h | 35 ++ mindspore/lite/CMakeLists.txt | 2 +- mindspore/lite/micro/cmake/file_list.cmake | 212 ++++---- mindspore/lite/micro/coder/CMakeLists.txt | 1 + mindspore/lite/nnacl/adder.h | 32 -- mindspore/lite/nnacl/arg_min_max_parameter.h | 54 -- mindspore/lite/nnacl/arithmetic.h | 46 -- .../lite/nnacl/arithmetic_self_parameter.h | 30 -- mindspore/lite/nnacl/assembly_global.h | 36 -- mindspore/lite/nnacl/base/arithmetic_base.h | 34 -- .../lite/nnacl/base/batch_to_space_base.h | 33 -- mindspore/lite/nnacl/base/cast_base.h | 104 ---- mindspore/lite/nnacl/base/concat_base.h | 32 -- mindspore/lite/nnacl/base/conv1x1_base.h | 32 -- .../lite/nnacl/base/depth_to_space_base.h | 30 -- mindspore/lite/nnacl/base/fill_base.h | 32 -- mindspore/lite/nnacl/base/gather_base.h | 33 -- .../nnacl/base/minimal_filtering_generator.h | 63 --- mindspore/lite/nnacl/base/slice_base.h | 34 -- mindspore/lite/nnacl/base/split_base.h | 32 -- mindspore/lite/nnacl/base/stack_base.h | 30 -- mindspore/lite/nnacl/base/tile_base.h | 53 -- mindspore/lite/nnacl/base/unstack_base.h | 32 -- mindspore/lite/nnacl/base/zeroslike_base.h | 34 -- mindspore/lite/nnacl/batch_to_space.h | 31 -- mindspore/lite/nnacl/batchnorm_parameter.h | 32 -- mindspore/lite/nnacl/broadcast_to_parameter.h | 34 -- mindspore/lite/nnacl/cast_parameter.h | 27 - mindspore/lite/nnacl/common_func.h | 77 --- mindspore/lite/nnacl/concat_parameter.h | 35 -- .../lite/nnacl/constant_of_shape_parameter.h | 31 -- mindspore/lite/nnacl/conv_parameter.h | 131 ----- mindspore/lite/nnacl/crop_parameter.h | 36 -- .../lite/nnacl/depth_to_space_parameter.h | 35 -- .../nnacl/detection_post_process_parameter.h | 48 -- mindspore/lite/nnacl/errorcode.h | 62 --- mindspore/lite/nnacl/fill_parameter.h | 28 - mindspore/lite/nnacl/fp16/activation_fp16.h | 41 -- mindspore/lite/nnacl/fp16/arg_min_max_fp16.h | 32 -- mindspore/lite/nnacl/fp16/arithmetic_fp16.h | 126 ----- .../lite/nnacl/fp16/arithmetic_self_fp16.h | 59 -- mindspore/lite/nnacl/fp16/cast_fp16.h | 65 --- mindspore/lite/nnacl/fp16/common_func_fp16.h | 41 -- .../lite/nnacl/fp16/constant_of_shape_fp16.h | 41 -- .../lite/nnacl/fp16/conv_depthwise_fp16.h | 57 -- mindspore/lite/nnacl/fp16/conv_fp16.h | 57 -- mindspore/lite/nnacl/fp16/crop_fp16.h | 36 -- mindspore/lite/nnacl/fp16/deconv_fp16.h | 36 -- .../lite/nnacl/fp16/deconv_winograd_fp16.h | 48 -- mindspore/lite/nnacl/fp16/exp_fp16.h | 70 --- mindspore/lite/nnacl/fp16/gru_fp16.h | 30 -- .../lite/nnacl/fp16/instance_norm_fp16.h | 31 -- mindspore/lite/nnacl/fp16/lstm_fp16.h | 49 -- mindspore/lite/nnacl/fp16/matmul_fp16.h | 68 --- mindspore/lite/nnacl/fp16/matrix_fp16.h | 36 -- mindspore/lite/nnacl/fp16/pack_fp16.h | 79 --- mindspore/lite/nnacl/fp16/pad_fp16.h | 35 -- mindspore/lite/nnacl/fp16/pooling_fp16.h | 36 -- mindspore/lite/nnacl/fp16/power_fp16.h | 63 --- .../lite/nnacl/fp16/quant_dtype_cast_fp16.h | 38 -- mindspore/lite/nnacl/fp16/reduce_fp16.h | 36 -- mindspore/lite/nnacl/fp16/scale_fp16.c | 223 -------- mindspore/lite/nnacl/fp16/scale_fp16.h | 38 -- mindspore/lite/nnacl/fp16/softmax_fp16.h | 35 -- mindspore/lite/nnacl/fp16/transpose_fp16.h | 35 -- .../lite/nnacl/fp16/winograd_transform_fp16.h | 63 --- .../lite/nnacl/fp16/winograd_utils_fp16.h | 502 ------------------ .../lite/nnacl/fp16_grad/activation_grad.h | 42 -- .../nnacl/fp16_grad/arithmetic_self_grad.h | 39 -- mindspore/lite/nnacl/fp32/activation_fp32.h | 49 -- mindspore/lite/nnacl/fp32/add_fp32.h | 45 -- mindspore/lite/nnacl/fp32/adder_fp32.h | 47 -- mindspore/lite/nnacl/fp32/arg_min_max_fp32.h | 31 -- .../lite/nnacl/fp32/arithmetic_compare_fp32.h | 50 -- mindspore/lite/nnacl/fp32/arithmetic_fp32.h | 71 --- .../lite/nnacl/fp32/arithmetic_self_fp32.h | 62 --- mindspore/lite/nnacl/fp32/batchnorm_fp32.h | 37 -- mindspore/lite/nnacl/fp32/broadcast_to_fp32.h | 30 -- mindspore/lite/nnacl/fp32/common_func_fp32.h | 105 ---- .../lite/nnacl/fp32/constant_of_shape_fp32.h | 45 -- mindspore/lite/nnacl/fp32/conv_common_fp32.h | 40 -- .../lite/nnacl/fp32/conv_depthwise_fp32.h | 91 ---- .../lite/nnacl/fp32/conv_winograd_fp32.h | 44 -- mindspore/lite/nnacl/fp32/crop_fp32.h | 34 -- mindspore/lite/nnacl/fp32/deconv_fp32.h | 37 -- .../lite/nnacl/fp32/deconv_winograd_fp32.h | 43 -- .../nnacl/fp32/detection_post_process_fp32.h | 59 -- mindspore/lite/nnacl/fp32/div_fp32.h | 43 -- mindspore/lite/nnacl/fp32/elu_fp32.h | 39 -- .../lite/nnacl/fp32/embedding_lookup_fp32.h | 43 -- mindspore/lite/nnacl/fp32/exp_fp32.h | 103 ---- mindspore/lite/nnacl/fp32/gatherNd_fp32.h | 35 -- mindspore/lite/nnacl/fp32/gru_fp32.h | 30 -- .../lite/nnacl/fp32/instance_norm_fp32.h | 32 -- .../lite/nnacl/fp32/invert_permutation_fp32.h | 27 - mindspore/lite/nnacl/fp32/l2_norm_fp32.h | 34 -- mindspore/lite/nnacl/fp32/layer_norm_fp32.h | 32 -- .../nnacl/fp32/local_response_norm_fp32.h | 40 -- mindspore/lite/nnacl/fp32/lstm_fp32.h | 43 -- mindspore/lite/nnacl/fp32/matmul_fp32.h | 83 --- mindspore/lite/nnacl/fp32/mul_fp32.h | 49 -- mindspore/lite/nnacl/fp32/one_hot_fp32.h | 47 -- mindspore/lite/nnacl/fp32/pack_fp32.h | 70 --- mindspore/lite/nnacl/fp32/pad_fp32.h | 41 -- mindspore/lite/nnacl/fp32/pooling_fp32.h | 38 -- mindspore/lite/nnacl/fp32/power_fp32.h | 53 -- mindspore/lite/nnacl/fp32/prelu_fp32.h | 32 -- mindspore/lite/nnacl/fp32/prior_box_fp32.h | 38 -- mindspore/lite/nnacl/fp32/range_fp32.h | 50 -- mindspore/lite/nnacl/fp32/rank_fp32.h | 32 -- mindspore/lite/nnacl/fp32/reduce_fp32.h | 58 -- mindspore/lite/nnacl/fp32/resize_fp32.h | 69 --- mindspore/lite/nnacl/fp32/reverse_fp32.h | 41 -- .../lite/nnacl/fp32/reverse_sequence_fp32.h | 33 -- mindspore/lite/nnacl/fp32/roi_pooling_fp32.h | 54 -- mindspore/lite/nnacl/fp32/scale_fp32.h | 35 -- mindspore/lite/nnacl/fp32/scatter_nd_fp32.h | 30 -- mindspore/lite/nnacl/fp32/softmax_fp32.h | 32 -- .../lite/nnacl/fp32/sparse_to_dense_fp32.h | 30 -- mindspore/lite/nnacl/fp32/splice_fp32.h | 31 -- .../lite/nnacl/fp32/squared_difference.c | 28 - .../lite/nnacl/fp32/squared_difference.h | 37 -- .../lite/nnacl/fp32/strided_slice_fp32.h | 33 -- mindspore/lite/nnacl/fp32/sub_fp32.h | 43 -- mindspore/lite/nnacl/fp32/topk_fp32.h | 47 -- mindspore/lite/nnacl/fp32/transpose_fp32.h | 35 -- mindspore/lite/nnacl/fp32/unique_fp32.h | 35 -- mindspore/lite/nnacl/fp32/where_fp32.h | 31 -- .../lite/nnacl/fp32/winograd_transform.h | 43 -- mindspore/lite/nnacl/fp32/winograd_utils.h | 316 ----------- .../lite/nnacl/fp32_grad/activation_grad.h | 47 -- .../lite/nnacl/fp32_grad/arithmetic_grad.h | 38 -- mindspore/lite/nnacl/fp32_grad/batch_norm.h | 42 -- .../nnacl/fp32_grad/binary_cross_entropy.h | 36 -- .../fp32_grad/binary_cross_entropy_grad.h | 36 -- .../nnacl/fp32_grad/convolution_grad_filter.h | 32 -- mindspore/lite/nnacl/fp32_grad/dropout_grad.h | 31 -- .../lite/nnacl/fp32_grad/dropout_parameter.h | 27 - mindspore/lite/nnacl/fp32_grad/gemm.h | 45 -- .../lite/nnacl/fp32_grad/layernorm_grad.h | 29 - .../nnacl/fp32_grad/layernormgrad_parameter.h | 27 - mindspore/lite/nnacl/fp32_grad/optimizer.h | 40 -- mindspore/lite/nnacl/fp32_grad/pack_ext.h | 39 -- mindspore/lite/nnacl/fp32_grad/pooling_grad.h | 32 -- mindspore/lite/nnacl/fp32_grad/reduce_grad.h | 30 -- mindspore/lite/nnacl/fp32_grad/resize_grad.h | 44 -- .../lite/nnacl/fp32_grad/smooth_l1_loss.h | 27 - mindspore/lite/nnacl/fp32_grad/softmax_grad.h | 47 -- .../lite/nnacl/fp32_grad/strided_slice_grad.h | 30 -- .../nnacl/fp32_grad/unsorted_segment_sum.h | 29 - mindspore/lite/nnacl/fp32_grad/utils.h | 72 --- mindspore/lite/nnacl/gelu_parameter.h | 28 - mindspore/lite/nnacl/gru_parameter.h | 38 -- mindspore/lite/nnacl/infer/adam_infer.h | 31 -- .../lite/nnacl/infer/add_sub_grad_infer.h | 31 -- mindspore/lite/nnacl/infer/addn_infer.h | 31 -- .../lite/nnacl/infer/apply_momentum_infer.h | 31 -- mindspore/lite/nnacl/infer/argmin_max_infer.h | 32 -- .../nnacl/infer/arithmetic_compare_infer.h | 31 -- .../lite/nnacl/infer/arithmetic_grad_infer.h | 31 -- mindspore/lite/nnacl/infer/arithmetic_infer.h | 32 -- mindspore/lite/nnacl/infer/assert_op_infer.h | 31 -- mindspore/lite/nnacl/infer/assign_add_infer.h | 31 -- mindspore/lite/nnacl/infer/assign_infer.h | 31 -- .../nnacl/infer/audio_spectrogram_infer.h | 37 -- .../lite/nnacl/infer/batch_to_space_infer.h | 32 -- mindspore/lite/nnacl/infer/bias_grad_infer.h | 31 -- .../nnacl/infer/binary_cross_entropy_infer.h | 32 -- mindspore/lite/nnacl/infer/bn_grad_infer.h | 31 -- .../lite/nnacl/infer/broadcast_to_infer.h | 32 -- mindspore/lite/nnacl/infer/cast_infer.h | 31 -- mindspore/lite/nnacl/infer/common_infer.h | 212 -------- mindspore/lite/nnacl/infer/concat_infer.h | 32 -- .../nnacl/infer/constant_of_shape_infer.h | 32 -- .../nnacl/infer/conv2d_grad_filter_infer.h | 32 -- .../nnacl/infer/conv2d_grad_input_infer.h | 32 -- mindspore/lite/nnacl/infer/conv2d_infer.h | 32 -- .../lite/nnacl/infer/crop_and_resize_infer.h | 31 -- mindspore/lite/nnacl/infer/crop_infer.h | 32 -- .../infer/custom_extract_features_infer.h | 31 -- .../lite/nnacl/infer/custom_normalize_infer.h | 32 -- .../lite/nnacl/infer/custom_predict_infer.h | 36 -- mindspore/lite/nnacl/infer/deconv2d_infer.h | 32 -- .../nnacl/infer/dedepthwise_conv2d_infer.h | 32 -- .../lite/nnacl/infer/depth_to_space_infer.h | 32 -- .../lite/nnacl/infer/depthwise_conv2d_infer.h | 32 -- .../infer/detection_post_process_infer.h | 32 -- .../lite/nnacl/infer/dropout_grad_infer.h | 31 -- mindspore/lite/nnacl/infer/dropout_infer.h | 31 -- .../lite/nnacl/infer/embedding_lookup_infer.h | 31 -- .../lite/nnacl/infer/expand_dims_infer.h | 31 -- mindspore/lite/nnacl/infer/fft_imag_infer.h | 31 -- mindspore/lite/nnacl/infer/fft_real_infer.h | 31 -- mindspore/lite/nnacl/infer/fill_infer.h | 31 -- .../lite/nnacl/infer/flatten_grad_infer.h | 31 -- mindspore/lite/nnacl/infer/flatten_infer.h | 31 -- .../lite/nnacl/infer/full_connection_infer.h | 32 -- .../lite/nnacl/infer/fused_batchnorm_infer.h | 31 -- mindspore/lite/nnacl/infer/gather_infer.h | 32 -- mindspore/lite/nnacl/infer/gather_nd_infer.h | 32 -- .../infer/group_conv2d_grad_input_infer.h | 32 -- mindspore/lite/nnacl/infer/gru_infer.h | 32 -- .../lite/nnacl/infer/hashtable_lookup_infer.h | 31 -- mindspore/lite/nnacl/infer/infer.h | 33 -- mindspore/lite/nnacl/infer/infer_register.h | 233 -------- .../nnacl/infer/invert_permutation_infer.h | 31 -- .../lite/nnacl/infer/layer_norm_grad_infer.h | 31 -- mindspore/lite/nnacl/infer/layer_norm_infer.h | 32 -- mindspore/lite/nnacl/infer/lin_space_infer.h | 31 -- .../lite/nnacl/infer/lsh_projection_infer.h | 32 -- mindspore/lite/nnacl/infer/lstm_infer.h | 32 -- mindspore/lite/nnacl/infer/matmul_infer.h | 32 -- .../lite/nnacl/infer/max_min_grad_infer.h | 31 -- mindspore/lite/nnacl/infer/mean_infer.h | 32 -- mindspore/lite/nnacl/infer/merge_infer.h | 32 -- mindspore/lite/nnacl/infer/mfcc_infer.h | 36 -- .../nnacl/infer/non_max_suppression_infer.h | 31 -- mindspore/lite/nnacl/infer/one_hot_infer.h | 32 -- mindspore/lite/nnacl/infer/pad_infer.h | 32 -- mindspore/lite/nnacl/infer/partial_infer.h | 32 -- .../lite/nnacl/infer/pooling_grad_infer.h | 32 -- mindspore/lite/nnacl/infer/pooling_infer.h | 32 -- mindspore/lite/nnacl/infer/power_infer.h | 32 -- mindspore/lite/nnacl/infer/prior_box_infer.h | 32 -- .../lite/nnacl/infer/quant_dtype_cast_infer.h | 37 -- .../infer/random_standard_normal_infer.h | 31 -- mindspore/lite/nnacl/infer/range_infer.h | 32 -- mindspore/lite/nnacl/infer/rank_infer.h | 31 -- mindspore/lite/nnacl/infer/reduce_infer.h | 32 -- mindspore/lite/nnacl/infer/reshape_infer.h | 32 -- mindspore/lite/nnacl/infer/resize_infer.h | 32 -- mindspore/lite/nnacl/infer/rfft_infer.h | 36 -- .../lite/nnacl/infer/roi_pooling_infer.h | 32 -- mindspore/lite/nnacl/infer/scatter_nd_infer.h | 32 -- mindspore/lite/nnacl/infer/select_infer.h | 31 -- mindspore/lite/nnacl/infer/sgd_infer.h | 31 -- mindspore/lite/nnacl/infer/shape_infer.h | 31 -- mindspore/lite/nnacl/infer/size_infer.h | 31 -- mindspore/lite/nnacl/infer/skip_gram_infer.h | 31 -- mindspore/lite/nnacl/infer/slice_infer.h | 32 -- .../nnacl/infer/softmax_cross_entropy_infer.h | 31 -- mindspore/lite/nnacl/infer/softmax_infer.h | 32 -- .../lite/nnacl/infer/space_to_batch_infer.h | 32 -- .../nnacl/infer/space_to_batch_nd_infer.h | 32 -- .../lite/nnacl/infer/space_to_depth_infer.h | 32 -- ..._softmax_cross_entropy_with_logits_infer.h | 31 -- .../lite/nnacl/infer/sparse_to_dense_infer.h | 31 -- mindspore/lite/nnacl/infer/splice_infer.h | 32 -- mindspore/lite/nnacl/infer/split_infer.h | 32 -- mindspore/lite/nnacl/infer/squeeze_infer.h | 32 -- mindspore/lite/nnacl/infer/stack_infer.h | 32 -- .../nnacl/infer/strided_slice_grad_infer.h | 32 -- .../lite/nnacl/infer/strided_slice_infer.h | 32 -- mindspore/lite/nnacl/infer/switch_infer.h | 32 -- .../nnacl/infer/tensorlist_fromtensor_infer.h | 31 -- .../nnacl/infer/tensorlist_getitem_infer.h | 32 -- .../nnacl/infer/tensorlist_reserve_infer.h | 31 -- .../nnacl/infer/tensorlist_setitem_infer.h | 31 -- .../lite/nnacl/infer/tensorlist_stack_infer.h | 31 -- mindspore/lite/nnacl/infer/tile_infer.h | 32 -- mindspore/lite/nnacl/infer/topk_infer.h | 32 -- mindspore/lite/nnacl/infer/transpose_infer.h | 32 -- .../lite/nnacl/infer/uniform_real_infer.h | 31 -- mindspore/lite/nnacl/infer/unique_infer.h | 31 -- .../nnacl/infer/unsorted_segment_sum_infer.h | 36 -- mindspore/lite/nnacl/infer/unsqueeze_infer.h | 32 -- mindspore/lite/nnacl/infer/unstack_infer.h | 32 -- mindspore/lite/nnacl/infer/where_infer.h | 31 -- mindspore/lite/nnacl/infer/while_infer.h | 31 -- .../lite/nnacl/instance_norm_parameter.h | 32 -- mindspore/lite/nnacl/int8/add_int8.h | 73 --- mindspore/lite/nnacl/int8/arg_min_max_int8.h | 41 -- mindspore/lite/nnacl/int8/arithmetic_int8.h | 51 -- .../lite/nnacl/int8/arithmetic_self_int8.h | 59 -- .../lite/nnacl/int8/batch_to_space_int8.h | 32 -- mindspore/lite/nnacl/int8/batchnorm_int8.h | 34 -- mindspore/lite/nnacl/int8/common_func_int8.h | 94 ---- mindspore/lite/nnacl/int8/concat_int8.h | 32 -- mindspore/lite/nnacl/int8/conv1x1_int8.h | 45 -- mindspore/lite/nnacl/int8/conv3x3_int8.h | 48 -- .../lite/nnacl/int8/conv_depthwise_int8.h | 49 -- mindspore/lite/nnacl/int8/conv_int8.h | 44 -- mindspore/lite/nnacl/int8/crop_int8.h | 34 -- mindspore/lite/nnacl/int8/deconv_int8.h | 46 -- .../lite/nnacl/int8/depth_to_space_int8.h | 31 -- mindspore/lite/nnacl/int8/div_int8.h | 33 -- mindspore/lite/nnacl/int8/fixed_point.h | 74 --- mindspore/lite/nnacl/int8/hswish_int8.h | 43 -- mindspore/lite/nnacl/int8/l2_norm_int8.h | 32 -- mindspore/lite/nnacl/int8/layer_norm_int8.h | 34 -- mindspore/lite/nnacl/int8/matmul_int8.h | 84 --- mindspore/lite/nnacl/int8/mul_int8.h | 38 -- mindspore/lite/nnacl/int8/pack_int8.h | 62 --- mindspore/lite/nnacl/int8/pad_int8.h | 35 -- mindspore/lite/nnacl/int8/pooling_int8.h | 44 -- mindspore/lite/nnacl/int8/power_int8.h | 32 -- .../lite/nnacl/int8/quant_dtype_cast_int8.h | 42 -- mindspore/lite/nnacl/int8/quantize.h | 219 -------- mindspore/lite/nnacl/int8/reduce_int8.h | 70 --- mindspore/lite/nnacl/int8/relux_int8.h | 43 -- mindspore/lite/nnacl/int8/reshape_int8.h | 32 -- mindspore/lite/nnacl/int8/resize_int8.h | 50 -- mindspore/lite/nnacl/int8/scale_int8.h | 35 -- mindspore/lite/nnacl/int8/slice_int8.h | 34 -- mindspore/lite/nnacl/int8/softmax_int8.h | 35 -- .../lite/nnacl/int8/space_to_batch_int8.h | 32 -- mindspore/lite/nnacl/int8/splice_int8.h | 30 -- mindspore/lite/nnacl/int8/split_int8.h | 33 -- mindspore/lite/nnacl/int8/sub_int8.h | 31 -- mindspore/lite/nnacl/int8/tanh_int8.h | 43 -- mindspore/lite/nnacl/int8/topk_int8.h | 36 -- mindspore/lite/nnacl/int8/transpose_int8.h | 35 -- .../lite/nnacl/intrinsics/avx/common_utils.h | 44 -- .../nnacl/intrinsics/ms_simd_instructions.h | 229 -------- .../lite/nnacl/intrinsics/sse/sse_common.h | 56 -- mindspore/lite/nnacl/l2_norm_parameter.h | 41 -- mindspore/lite/nnacl/layer_norm_parameter.h | 50 -- .../lite/nnacl/lsh_projection_parameter.h | 35 -- mindspore/lite/nnacl/lstm_parameter.h | 40 -- mindspore/lite/nnacl/matmul_parameter.h | 79 --- mindspore/lite/nnacl/mul_parameter.h | 40 -- mindspore/lite/nnacl/nnacl_common.h | 60 --- mindspore/lite/nnacl/nnacl_utils.h | 39 -- .../nnacl/non_max_suppression_parameter.h | 28 - mindspore/lite/nnacl/op_base.h | 107 ---- mindspore/lite/nnacl/pack.h | 31 -- mindspore/lite/nnacl/pad_parameter.h | 51 -- mindspore/lite/nnacl/pooling_parameter.h | 57 -- mindspore/lite/nnacl/power_parameter.h | 41 -- mindspore/lite/nnacl/predict_parameter.h | 32 -- mindspore/lite/nnacl/prelu_parameter.h | 32 -- mindspore/lite/nnacl/prior_box_parameter.h | 40 -- mindspore/lite/nnacl/random_parameter.h | 27 - mindspore/lite/nnacl/reduce_parameter.h | 34 -- mindspore/lite/nnacl/reshape_parameter.h | 40 -- mindspore/lite/nnacl/resize_parameter.h | 37 -- .../lite/nnacl/reverse_sequence_parameter.h | 45 -- mindspore/lite/nnacl/scale.h | 46 -- mindspore/lite/nnacl/sigmoid_parameter.h | 41 -- mindspore/lite/nnacl/skip_gram_parameter.h | 30 -- mindspore/lite/nnacl/slice_parameter.h | 45 -- mindspore/lite/nnacl/softmax_parameter.h | 35 -- .../lite/nnacl/sparse_to_dense_parameter.h | 31 -- mindspore/lite/nnacl/splice_parameter.h | 29 - mindspore/lite/nnacl/split_parameter.h | 47 -- mindspore/lite/nnacl/squeeze_parameter.h | 46 -- mindspore/lite/nnacl/stack_parameter.h | 27 - .../lite/nnacl/strided_slice_parameter.h | 43 -- mindspore/lite/nnacl/tensor_c.h | 29 - mindspore/lite/nnacl/tensorlist_parameter.h | 32 -- mindspore/lite/nnacl/transpose.h | 40 -- mindspore/lite/nnacl/unsqueeze_parameter.h | 48 -- mindspore/lite/nnacl/unstack_parameter.h | 34 -- mindspore/lite/nnacl/upsample_parameter.h | 29 - mindspore/lite/nnacl/where_parameter.h | 35 -- mindspore/lite/src/CMakeLists.txt | 4 +- .../src/runtime/kernel/arm/fp16/power_fp16.h | 2 +- .../runtime/kernel/arm/fp32/l2_norm_fp32.cc | 2 +- .../arm/fp32/non_max_suppression_fp32.h | 2 +- .../src/runtime/kernel/arm/fp32/power_fp32.h | 2 +- .../runtime/kernel/arm/fp32/scatter_nd_fp32.h | 2 +- .../runtime/kernel/arm/fp32/skip_gram_fp32.h | 2 +- .../kernel/arm/fp32/sparse_to_dense_fp32.cc | 4 +- .../kernel/arm/fp32/sparse_to_dense_fp32.h | 2 +- .../src/runtime/kernel/arm/fp32/where_fp32.cc | 2 +- .../src/runtime/kernel/arm/fp32/where_fp32.h | 2 +- .../runtime/kernel/arm/fp32/zeroslike_fp32.cc | 2 +- .../runtime/kernel/arm/int8/argminmax_int8.h | 2 +- .../kernel/arm/int8/depth_to_space_int8.h | 2 +- .../runtime/kernel/arm/int8/gatherNd_int8.h | 2 +- .../runtime/kernel/arm/int8/gather_int8.cc | 2 +- .../src/runtime/kernel/arm/int8/gather_int8.h | 2 +- .../src/runtime/kernel/arm/int8/hswish_int8.h | 2 +- .../src/runtime/kernel/arm/int8/matmul_int8.h | 2 +- .../src/runtime/kernel/arm/int8/power_int8.h | 2 +- .../runtime/kernel/arm/int8/reduce_int8.cc | 2 +- .../src/runtime/kernel/arm/int8/reduce_int8.h | 2 +- .../src/runtime/kernel/arm/int8/resize_int8.h | 2 +- .../runtime/kernel/arm/int8/sigmoid_int8.cc | 2 +- .../src/runtime/kernel/arm/int8/slice_int8.h | 2 +- .../runtime/kernel/arm/int8/softmax_int8.h | 2 +- .../src/runtime/kernel/arm/int8/tanh_int8.h | 2 +- .../src/runtime/kernel/opencl/kernel/fill.h | 2 +- .../src/runtime/kernel/opencl/kernel/power.h | 2 +- .../src/runtime/kernel/opencl/kernel/prelu.cc | 2 +- .../kernel/opencl/kernel/sparse_to_dense.h | 2 +- mindspore/lite/test/CMakeLists.txt | 30 +- .../test/ut/nnacl/infer/adam_infer_test.cc | 2 +- .../test/ut/nnacl/infer/addn_infer_test.cc | 2 +- .../nnacl/infer/apply_momentum_infer_test.cc | 2 +- .../test/ut/nnacl/infer/argmax_infer_test.cc | 2 +- .../test/ut/nnacl/infer/argmin_infer_test.cc | 2 +- .../infer/arithmetic_compare_infer_test.cc | 2 +- .../ut/nnacl/infer/arithmetic_infer_test.cc | 2 +- .../ut/nnacl/infer/assign_add_infer_test.cc | 2 +- .../test/ut/nnacl/infer/assign_infer_test.cc | 2 +- .../infer/audio_spectrogram_infer_test.cc | 2 +- .../nnacl/infer/batch_to_space_infer_test.cc | 2 +- .../ut/nnacl/infer/bias_grad_infer_test.cc | 2 +- .../infer/binary_cross_entropy_infer_test.cc | 2 +- .../test/ut/nnacl/infer/bn_grad_infer_test.cc | 2 +- .../ut/nnacl/infer/broadcast_to_infer_test.cc | 2 +- .../test/ut/nnacl/infer/cast_infer_test.cc | 2 +- .../test/ut/nnacl/infer/concat_infer_test.cc | 2 +- .../infer/constant_of_shape_infer_test.cc | 2 +- .../infer/conv2d_grad_filter_infer_test.cc | 2 +- .../infer/conv2d_grad_input_infer_test.cc | 2 +- .../test/ut/nnacl/infer/conv2d_infer_test.cc | 2 +- .../nnacl/infer/crop_and_resize_infer_test.cc | 2 +- .../test/ut/nnacl/infer/crop_infer_test.cc | 2 +- .../custom_extract_features_infer_test.cc | 2 +- .../infer/custom_normalize_infer_test.cc | 2 +- .../nnacl/infer/custom_predict_infer_test.cc | 2 +- .../ut/nnacl/infer/deconv2d_infer_test.cc | 2 +- .../infer/dedepthwise_conv2d_infer_test.cc | 2 +- .../nnacl/infer/depth_to_space_infer_test.cc | 2 +- .../infer/depthwise_conv2d_infer_test.cc | 2 +- .../detection_post_process_infer_test.cc | 2 +- .../ut/nnacl/infer/dropout_grad_infer_test.cc | 2 +- .../infer/embedding_lookup_infer_test.cc | 2 +- .../ut/nnacl/infer/expand_dims_infer_test.cc | 2 +- .../ut/nnacl/infer/fft_imag_infer_test.cc | 2 +- .../test/ut/nnacl/infer/fill_infer_test.cc | 2 +- .../ut/nnacl/infer/flatten_grad_infer_test.cc | 2 +- .../test/ut/nnacl/infer/flatten_infer_test.cc | 2 +- .../nnacl/infer/full_connection_infer_test.cc | 2 +- .../nnacl/infer/fused_batchnorm_infer_test.cc | 2 +- .../test/ut/nnacl/infer/gather_infer_test.cc | 2 +- .../ut/nnacl/infer/gather_nd_infer_test.cc | 2 +- .../group_conv2d_grad_input_infer_test.cc | 2 +- .../test/ut/nnacl/infer/gru_infer_test.cc | 2 +- .../infer/hashtable_lookup_infer_test.cc | 2 +- .../infer/invert_permutation_infer_test.cc | 2 +- .../ut/nnacl/infer/layer_norm_infer_test.cc | 2 +- .../nnacl/infer/lsh_projection_infer_test.cc | 2 +- .../test/ut/nnacl/infer/lstm_infer_test.cc | 2 +- .../test/ut/nnacl/infer/matmul_infer_test.cc | 2 +- .../ut/nnacl/infer/max_min_grad_infer_test.cc | 4 +- .../test/ut/nnacl/infer/mean_infer_test.cc | 2 +- .../test/ut/nnacl/infer/mfcc_infer_test.cc | 2 +- .../test/ut/nnacl/infer/one_hot_infer_test.cc | 2 +- .../test/ut/nnacl/infer/pad_infer_test.cc | 2 +- .../ut/nnacl/infer/pooling_grad_infer_test.cc | 2 +- .../test/ut/nnacl/infer/pooling_infer_test.cc | 2 +- .../test/ut/nnacl/infer/power_infer_test.cc | 2 +- .../infer/quant_dtype_cast_infer_test.cc | 2 +- .../random_standard_normal_infer_test.cc | 2 +- .../test/ut/nnacl/infer/range_infer_test.cc | 2 +- .../test/ut/nnacl/infer/rank_infer_test.cc | 2 +- .../test/ut/nnacl/infer/reduce_infer_test.cc | 2 +- .../test/ut/nnacl/infer/reshape_infer_test.cc | 2 +- .../test/ut/nnacl/infer/resize_infer_test.cc | 2 +- .../test/ut/nnacl/infer/rfft_infer_test.cc | 2 +- .../ut/nnacl/infer/roi_pooling_infer_test.cc | 2 +- .../ut/nnacl/infer/scatter_nd_infer_test.cc | 2 +- .../test/ut/nnacl/infer/select_infer_test.cc | 2 +- .../test/ut/nnacl/infer/sgd_infer_test.cc | 2 +- .../test/ut/nnacl/infer/shape_infer_test.cc | 2 +- .../test/ut/nnacl/infer/size_infer_test.cc | 2 +- .../ut/nnacl/infer/skip_gram_infer_test.cc | 2 +- .../test/ut/nnacl/infer/slice_infer_test.cc | 2 +- .../infer/softmax_cross_entropy_infer_test.cc | 2 +- .../test/ut/nnacl/infer/softmax_infer_test.cc | 2 +- .../nnacl/infer/space_to_batch_infer_test.cc | 2 +- .../infer/space_to_batch_nd_infer_test.cc | 2 +- .../nnacl/infer/space_to_depth_infer_test.cc | 2 +- .../nnacl/infer/sparse_to_dense_infer_test.cc | 2 +- .../test/ut/nnacl/infer/split_infer_test.cc | 2 +- .../test/ut/nnacl/infer/squeeze_infer_test.cc | 2 +- .../test/ut/nnacl/infer/stack_infer_test.cc | 2 +- .../nnacl/infer/strided_slice_infer_test.cc | 2 +- .../infer/tensorlist_fromtensor_infer_test.cc | 2 +- .../infer/tensorlist_getitem_infer_test.cc | 2 +- .../infer/tensorlist_reserve_infer_test.cc | 2 +- .../infer/tensorlist_setitem_infer_test.cc | 2 +- .../infer/tensorlist_stack_infer_test.cc | 2 +- .../test/ut/nnacl/infer/tile_infer_test.cc | 2 +- .../test/ut/nnacl/infer/topk_infer_test.cc | 2 +- .../ut/nnacl/infer/transpose_infer_test.cc | 2 +- .../test/ut/nnacl/infer/unique_infer_test.cc | 2 +- .../infer/unsorted_segment_sum_infer_test.cc | 2 +- .../ut/nnacl/infer/unsqueeze_infer_test.cc | 2 +- .../test/ut/nnacl/infer/unstack_infer_test.cc | 2 +- .../test/ut/nnacl/infer/where_infer_test.cc | 2 +- .../test/ut/nnacl/infer/while_infer_test.cc | 2 +- .../runtime/kernel/arm/common/pack_tests.cc | 4 +- .../kernel/arm/fp32/activation_fp32_test.cc | 2 +- .../arm/fp32/batch_to_space_fp32_test.cc | 6 +- .../kernel/arm/fp32/batchnorm_fp32_tests.cc | 2 +- .../runtime/kernel/arm/fp32/crop_fp32_test.cc | 2 +- .../arm/fp32/deconvolution_fp32_tests.cc | 4 +- .../arm/fp32/depth_to_space_fp32_test.cc | 4 +- .../arm/fp32/lsh_projection_fp32_tests.cc | 2 +- .../kernel/arm/fp32/lstm_fp32_tests.cc | 2 +- .../kernel/arm/fp32/matmul_fp32_tests.cc | 2 +- .../kernel/arm/fp32/reduce_fp32_tests.cc | 2 +- .../arm/fp32/reverse_sequence_fp32_tests.cc | 2 +- .../runtime/kernel/arm/fp32/skip_gram_fp32.cc | 2 +- .../arm/fp32/space_to_batch_fp32_tests.cc | 2 +- .../arm/fp32/space_to_depth_fp32_tests.cc | 4 +- .../arm/fp32/sparse_to_dense_fp32_tests.cc | 2 +- .../kernel/arm/fp32/stack_fp32_test.cc | 2 +- .../arm/fp32/strided_slice_fp32_tests.cc | 2 +- .../kernel/arm/fp32/tile_fp32_tests.cc | 2 +- .../kernel/arm/fp32/topk_fp32_tests.cc | 2 +- .../kernel/arm/fp32/transpose_fp32_tests.cc | 2 +- .../kernel/arm/fp32/uniform_real_fp32_test.cc | 2 +- .../kernel/arm/fp32/unique_fp32_tests.cc | 2 +- .../kernel/arm/fp32/unstack_fp32_tests.cc | 2 +- .../fp32_grad/convolution_grad_fp32_tests.cc | 2 +- .../deconvolution_grad_fp32_tests.cc | 2 +- .../arm/fp32_grad/softmax_grad_fp32_tests.cc | 2 +- .../arm/int8/arithmetic_self_int8_tests.cc | 2 +- .../kernel/arm/int8/batchnorm_int8_test.cc | 4 +- .../kernel/arm/int8/concat_int8_tests.cc | 2 +- .../kernel/arm/int8/conv_1x1_int8_tests.cc | 2 +- .../kernel/arm/int8/crop_int8_tests.cc | 2 +- .../kernel/arm/int8/deconv_int8_tests.cc | 6 +- .../arm/int8/fullconnection_int8_tests.cc | 4 +- .../kernel/arm/int8/gatherNd_int8_test.cc | 4 +- .../kernel/arm/int8/gather_int8_test.cc | 4 +- .../kernel/arm/int8/hswish_int8_tests.cc | 2 +- .../kernel/arm/int8/matmul_int8_tests.cc | 2 +- .../runtime/kernel/arm/int8/mul_int8_tests.cc | 2 +- .../kernel/arm/int8/power_int8_tests.cc | 2 +- .../kernel/arm/int8/prelu_int8_tests.cc | 2 +- .../kernel/arm/int8/quant_dtype_cast_tests.cc | 2 +- .../kernel/arm/int8/reshape_int8_tests.cc | 2 +- .../kernel/arm/int8/sigmoid_int8_tests.cc | 2 +- .../kernel/arm/int8/softmax_int8_tests.cc | 2 +- .../kernel/arm/int8/split_int8_tests.cc | 2 +- .../kernel/arm/int8/squeeze_int8_tests.cc | 2 +- .../kernel/arm/int8/topk_int8_tests.cc | 2 +- .../kernel/arm/int8/unsqueeze_int8_tests.cc | 2 +- .../runtime/kernel/arm/string/normalize.cc | 2 +- .../src/runtime/kernel/opencl/prelu_tests.cc | 2 +- .../kernel/opencl/space_to_depth_tests.cc | 4 +- mindspore/lite/tools/converter/CMakeLists.txt | 19 +- .../tools/cropper/build_cropper_config.sh | 17 +- 1295 files changed, 15957 insertions(+), 15928 deletions(-) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/CMakeLists.txt (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/README.md (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/adder.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/arg_min_max_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/arithmetic.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/arithmetic_self_parameter.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/ConvDw3x3Int8BorderPixel.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/ConvDwFp32Border.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/ConvDwFp32Center.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/ConvDwFp32Row.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/ConvDwInt8Center.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/ConvDwInt8PostAlign4.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/ConvDwInt8PostAlign4PerChannel.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/ConvDwInt8Row.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/DeconvDwFp32Center.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/DeconvDwInt8Center.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/DeconvDwInt8Post.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/IndirectGemmInt16to32_8x4.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/IndirectGemmInt8_2x4.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/MatVecMulFp32.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/MatmulFp32.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/MatmulFp32Opt.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/MatmulFp32Opt12x4.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/MatmulInt8.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/MatmulInt8Opt.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/MatmulWinogradFp32.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/PostFuncBiasReluC4.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/PostFuncBiasReluC8.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/PreSum4x16Int8Peroc.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/PreSum4x16Int8Pert.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/TiledC4MatmulFp32.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/WinogradTransLeft.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm32/WinogradTransRight.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/AdderFp32.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDw3x3Fp32Corner.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDw3x3Fp32Horizontal.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDw3x3Fp32Stride1.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDw3x3Fp32Stride2.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDw3x3Fp32Vertical.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDw3x3Int8.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDw3x3Int8Corner.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDw3x3Int8Horizontal.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDw3x3Int8Stride2.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDw3x3Int8Vertical.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDwFp32Border.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDwFp32Center.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDwFp32Indirect3x3.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDwFp32Indirect5x5.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDwFp32Row.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDwInt8Center.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDwInt8PostAlign4.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDwInt8PostAlign4PerChannel.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvDwInt8Row.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/ConvFp32Center.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/DeconvDwFp32Border.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/DeconvDwFp32Center.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/DeconvDwInt8Center.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/DeconvDwInt8Post.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/IndirectGemmInt16to32_8x4.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/MatVecMulFp32.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/MatmulFp32.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/MatmulFp32Opt.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/MatmulInt8.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/MatmulInt8Opt.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/MatmulR4Int8.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/MatmulWinogradFp32.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/PostFuncBiasReluC4.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/PostFuncBiasReluC8.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/PostFuncInt8C4Neon64.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/PreSum4x16Int8Peroc.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/PreSum4x16Int8Pert.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/TiledC4MatmulFp32.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/WinogradTransLeft.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/arm64/WinogradTransRight.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/avx/ConvDwFp32Avx3x3.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/avx/ConvDwFp32BorderAvx.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/avx/ConvDwFp32RowAvx.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/avx/MatmulAvx.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/fp16/ConvDwFp16Border.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/fp16/ConvDwFp16Center.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/fp16/ConvDwFp16Row.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/fp16/DeconvDwFp16Border.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/fp16/DeconvDwFp16Center.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/fp16/Float16ToFloat32.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/fp16/Float32ToFloat16.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/fp16/IndirectGemmFp16_16x8.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/fp16/MatVecMulFp16.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/fp16/MatmulFp16.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/fp16/MatmulFp16Opt.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/fp16/MatmulWinogradFp16.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/fp16/PostFuncBiasReluC4Fp16.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/fp16/PostFuncBiasReluC8Fp16.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/fp16/TiledC4MatmulFp16.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/fp16/WinogradTransLeftFp16.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/fp16/WinogradTransRightFp16.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/opt/MatmulDpInt8.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/opt/MatmulDpInt8Opt.S (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/assembly/opt/MatmulOptR4Int8.S (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly_global.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/base/arithmetic_base.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/arithmetic_base.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/base/batch_to_space_base.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/batch_to_space_base.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/cast_base.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/base/concat_base.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/concat_base.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/base/conv1x1_base.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/conv1x1_base.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/base/depth_to_space_base.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/depth_to_space_base.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/base/fill_base.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/fill_base.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/base/gather_base.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/gather_base.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/base/minimal_filtering_generator.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/minimal_filtering_generator.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/base/slice_base.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/slice_base.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/base/space_to_depth_base.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/base/space_to_depth_base.h (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/base/split_base.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/split_base.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/base/stack_base.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/stack_base.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/base/tile_base.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/tile_base.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/base/unstack_base.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/unstack_base.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/zeroslike_base.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/batch_to_space.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/batchnorm_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/broadcast_to_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/cast_parameter.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/common_func.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/common_func.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/concat_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/constant_of_shape_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/conv_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/crop_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/depth_to_space_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/detection_post_process_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/errorcode.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fill_parameter.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/activation_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/activation_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/arg_min_max_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arg_min_max_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/arithmetic_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arithmetic_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/arithmetic_self_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arithmetic_self_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/batchnorm_fp16.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/batchnorm_fp16.h (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/cast_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/common_func_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/common_func_fp16.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/constant_of_shape_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/conv_depthwise_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/conv_depthwise_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/conv_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/conv_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/crop_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/crop_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/deconv_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/deconv_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/deconv_winograd_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/deconv_winograd_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/exp_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/exp_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/gru_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/gru_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/instance_norm_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/instance_norm_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/log_softmax_fp16.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/log_softmax_fp16.h (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/lstm_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/lstm_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/matmul_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/matmul_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/matrix_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/matrix_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/pack_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pack_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/pad_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pad_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/pooling_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pooling_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/power_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/power_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/quant_dtype_cast_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/quant_dtype_cast_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/reduce_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/reduce_fp16.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/scale_fp16.c create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/scale_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/softmax_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/softmax_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/transpose_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/transpose_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/winograd_transform_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/winograd_transform_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16/winograd_utils_fp16.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/winograd_utils_fp16.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16_grad/activation_grad.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16_grad/activation_grad.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp16_grad/arithmetic_self_grad.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16_grad/arithmetic_self_grad.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/activation_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/activation_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/add_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/add_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/adder_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/adder_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/arg_min_max_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arg_min_max_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/arithmetic_compare_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_compare_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/arithmetic_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/arithmetic_self_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_self_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/batchnorm_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/batchnorm_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/broadcast_to_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/broadcast_to_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/common_func_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/common_func_fp32.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/constant_of_shape_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/conv_common_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_common_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/conv_depthwise_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_depthwise_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/conv_winograd_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_winograd_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/crop_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/crop_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/deconv_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/deconv_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/deconv_winograd_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/deconv_winograd_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/detection_post_process_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/detection_post_process_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/div_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/div_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/elu_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/elu_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/embedding_lookup_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/embedding_lookup_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/exp_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/exp_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/gatherNd_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/gatherNd_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/gru_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/gru_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/instance_norm_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/instance_norm_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/invert_permutation_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/invert_permutation_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/l2_norm_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/l2_norm_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/layer_norm_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/layer_norm_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/local_response_norm_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/local_response_norm_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/log_softmax_fp32.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/log_softmax_fp32.h (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/lstm_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/lstm_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/matmul_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/matmul_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/mul_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/mul_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/one_hot_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/one_hot_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/pack_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pack_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/pad_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pad_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/pooling_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pooling_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/power_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/power_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/prelu_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/prelu_fp32.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/prior_box_fp32.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/range_fp32.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/rank_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/reduce_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reduce_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/resize_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/resize_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/reverse_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reverse_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/reverse_sequence_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reverse_sequence_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/roi_pooling_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/roi_pooling_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/scale_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/scale_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/scatter_nd_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/scatter_nd_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/softmax_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/softmax_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/space_to_batch_fp32.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/space_to_batch_fp32.h (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/sparse_to_dense_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/sparse_to_dense_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/splice_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/splice_fp32.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/squared_difference.c create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/squared_difference.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/strided_slice_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/strided_slice_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/sub_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/sub_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/topk_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/topk_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/transpose_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/transpose_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/unique_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/unique_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/where_fp32.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/where_fp32.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/winograd_transform.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/winograd_transform.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32/winograd_utils.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/winograd_utils.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32_grad/activation_grad.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/activation_grad.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32_grad/arithmetic_grad.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/arithmetic_grad.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32_grad/batch_norm.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/batch_norm.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32_grad/binary_cross_entropy.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/binary_cross_entropy.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32_grad/binary_cross_entropy_grad.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/binary_cross_entropy_grad.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32_grad/convolution_grad_filter.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/convolution_grad_filter.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32_grad/dropout_grad.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/dropout_grad.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/dropout_parameter.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32_grad/gemm.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/gemm.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32_grad/layernorm_grad.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/layernorm_grad.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/layernormgrad_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/optimizer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32_grad/pack_ext.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/pack_ext.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32_grad/pooling_grad.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/pooling_grad.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32_grad/reduce_grad.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/reduce_grad.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32_grad/resize_grad.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/resize_grad.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/smooth_l1_loss.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32_grad/softmax_grad.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/softmax_grad.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32_grad/strided_slice_grad.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/strided_slice_grad.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/fp32_grad/unsorted_segment_sum.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/unsorted_segment_sum.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/utils.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/gather_parameter.h (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/gelu_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/gru_parameter.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/adam_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/adam_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/add_sub_grad_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/add_sub_grad_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/addn_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/addn_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/apply_momentum_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/apply_momentum_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/argmin_max_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/argmin_max_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/arithmetic_compare_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_compare_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/arithmetic_grad_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_grad_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/arithmetic_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/assert_op_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assert_op_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/assign_add_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assign_add_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/assign_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assign_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/audio_spectrogram_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/audio_spectrogram_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/batch_to_space_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/batch_to_space_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/bias_grad_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/bias_grad_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/binary_cross_entropy_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/binary_cross_entropy_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/bn_grad_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/bn_grad_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/broadcast_to_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/broadcast_to_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/cast_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/cast_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/common_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/concat_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/concat_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/constant_of_shape_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/constant_of_shape_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/conv2d_grad_filter_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_filter_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/conv2d_grad_input_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_input_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/conv2d_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/crop_and_resize_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/crop_and_resize_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/crop_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/crop_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/custom_extract_features_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_extract_features_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/custom_normalize_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_normalize_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/custom_predict_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_predict_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/deconv2d_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/deconv2d_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/dedepthwise_conv2d_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dedepthwise_conv2d_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/depth_to_space_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/depth_to_space_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/depthwise_conv2d_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/depthwise_conv2d_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/detection_post_process_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/detection_post_process_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/dropout_grad_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dropout_grad_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/dropout_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dropout_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/embedding_lookup_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/embedding_lookup_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/expand_dims_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/expand_dims_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/fft_imag_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fft_imag_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/fft_real_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fft_real_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/fill_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fill_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/flatten_grad_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/flatten_grad_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/flatten_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/flatten_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/full_connection_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/full_connection_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/fused_batchnorm_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fused_batchnorm_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/gather_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gather_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/gather_nd_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gather_nd_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/group_conv2d_grad_input_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/group_conv2d_grad_input_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/gru_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gru_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/hashtable_lookup_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/hashtable_lookup_infer.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/infer_register.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/infer_register.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/invert_permutation_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/invert_permutation_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/layer_norm_grad_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/layer_norm_grad_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/layer_norm_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/layer_norm_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/lin_space_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lin_space_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/log_softmax_infer.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/log_softmax_infer.h (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/lsh_projection_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lsh_projection_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/lstm_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lstm_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/matmul_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/matmul_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/max_min_grad_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/max_min_grad_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/mean_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/mean_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/merge_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/merge_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/mfcc_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/mfcc_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/non_max_suppression_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/non_max_suppression_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/one_hot_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/one_hot_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/pad_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pad_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/partial_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/partial_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/pooling_grad_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pooling_grad_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/pooling_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pooling_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/power_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/power_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/prior_box_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/prior_box_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/quant_dtype_cast_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/quant_dtype_cast_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/random_standard_normal_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/random_standard_normal_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/range_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/range_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/rank_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/rank_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/reduce_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/reduce_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/reshape_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/reshape_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/resize_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/resize_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/rfft_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/rfft_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/roi_pooling_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/roi_pooling_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/scatter_nd_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/scatter_nd_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/select_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/select_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/sgd_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sgd_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/shape_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/shape_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/size_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/size_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/skip_gram_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/skip_gram_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/slice_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/slice_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/softmax_cross_entropy_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/softmax_cross_entropy_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/softmax_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/softmax_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/space_to_batch_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_batch_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/space_to_batch_nd_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_batch_nd_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/space_to_depth_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_depth_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/sparse_to_dense_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sparse_to_dense_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/splice_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/splice_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/split_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/split_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/squeeze_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/squeeze_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/stack_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/stack_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/strided_slice_grad_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/strided_slice_grad_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/strided_slice_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/strided_slice_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/switch_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/switch_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/tensorlist_fromtensor_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_fromtensor_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/tensorlist_getitem_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_getitem_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/tensorlist_reserve_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_reserve_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/tensorlist_setitem_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_setitem_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/tensorlist_stack_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_stack_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/tile_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tile_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/topk_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/topk_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/transpose_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/transpose_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/uniform_real_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/uniform_real_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/unique_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unique_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/unsorted_segment_sum_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unsorted_segment_sum_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/unsqueeze_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unsqueeze_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/unstack_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unstack_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/where_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/where_infer.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/infer/while_infer.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/while_infer.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/instance_norm_parameter.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/add_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/add_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/arg_min_max_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arg_min_max_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/arithmetic_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arithmetic_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/arithmetic_self_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arithmetic_self_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/batch_to_space_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/batch_to_space_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/batchnorm_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/batchnorm_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/common_func_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/common_func_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/concat_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/concat_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/conv1x1_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv1x1_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/conv3x3_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv3x3_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/conv_depthwise_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv_depthwise_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/conv_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/crop_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/crop_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/deconv_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/deconv_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/depth_to_space_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/depth_to_space_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/div_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/div_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/fixed_point.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/fixed_point.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/gatherNd_int8.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/gatherNd_int8.h (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/gather_int8.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/gather_int8.h (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/hswish_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/hswish_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/l2_norm_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/l2_norm_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/layer_norm_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/layer_norm_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/leaky_relu_int8.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/leaky_relu_int8.h (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/matmul_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/matmul_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/mul_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/mul_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/pack_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pack_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/pad_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pad_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/pooling_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pooling_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/power_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/power_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/quant_dtype_cast_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/quant_dtype_cast_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/quantize.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/quantize.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/reduce_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/reduce_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/relux_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/relux_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/reshape_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/reshape_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/resize_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/resize_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/scale_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/scale_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/sigmoid_int8.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/sigmoid_int8.h (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/slice_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/slice_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/softmax_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/softmax_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/space_to_batch_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/space_to_batch_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/splice_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/splice_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/split_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/split_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/squeeze_int8.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/squeeze_int8.h (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/sub_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/sub_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/tanh_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/tanh_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/topk_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/topk_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/transpose_int8.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/transpose_int8.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/unsqueeze_int8.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/int8/unsqueeze_int8.h (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/intrinsics/avx/common_utils.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/avx/common_utils.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/ms_simd_instructions.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/intrinsics/sse/ConvDwFp32IndirectRow.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/intrinsics/sse/ConvDwFp32Row_sse.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/intrinsics/sse/DepthwiseFp32_Sse.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/intrinsics/sse/MatMul_Sse.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/intrinsics/sse/PostFuncBiasReluC4.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/intrinsics/sse/PostFuncBiasReluC8.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/intrinsics/sse/TiledC4MatMulFp32.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/intrinsics/sse/WinogradTrans.c (100%) rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/intrinsics/sse/sse_common.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/sse_common.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/l2_norm_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/layer_norm_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/lsh_projection_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/lstm_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/matmul_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/mul_parameter.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/nnacl_common.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/nnacl_common.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/nnacl_utils.c (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/nnacl_utils.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/non_max_suppression_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/op_base.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/optimize/CMakeLists.txt (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/pack.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/pad_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/pooling_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/power_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/predict_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/prelu_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/prior_box_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/random_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/reduce_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/reshape_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/resize_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/reverse_sequence_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/scale.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/sigmoid_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/skip_gram_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/slice_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/softmax_parameter.h rename mindspore/{lite => ccsrc/backend/kernel_compiler/cpu}/nnacl/space_to_depth_parameter.h (100%) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/sparse_to_dense_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/splice_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/split_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/squeeze_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/stack_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/strided_slice_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/tensor_c.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/tensorlist_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/transpose.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/unsqueeze_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/unstack_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/upsample_parameter.h create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/where_parameter.h delete mode 100644 mindspore/lite/nnacl/adder.h delete mode 100644 mindspore/lite/nnacl/arg_min_max_parameter.h delete mode 100644 mindspore/lite/nnacl/arithmetic.h delete mode 100644 mindspore/lite/nnacl/arithmetic_self_parameter.h delete mode 100644 mindspore/lite/nnacl/assembly_global.h delete mode 100644 mindspore/lite/nnacl/base/arithmetic_base.h delete mode 100644 mindspore/lite/nnacl/base/batch_to_space_base.h delete mode 100644 mindspore/lite/nnacl/base/cast_base.h delete mode 100644 mindspore/lite/nnacl/base/concat_base.h delete mode 100644 mindspore/lite/nnacl/base/conv1x1_base.h delete mode 100644 mindspore/lite/nnacl/base/depth_to_space_base.h delete mode 100644 mindspore/lite/nnacl/base/fill_base.h delete mode 100644 mindspore/lite/nnacl/base/gather_base.h delete mode 100644 mindspore/lite/nnacl/base/minimal_filtering_generator.h delete mode 100644 mindspore/lite/nnacl/base/slice_base.h delete mode 100644 mindspore/lite/nnacl/base/split_base.h delete mode 100644 mindspore/lite/nnacl/base/stack_base.h delete mode 100644 mindspore/lite/nnacl/base/tile_base.h delete mode 100644 mindspore/lite/nnacl/base/unstack_base.h delete mode 100644 mindspore/lite/nnacl/base/zeroslike_base.h delete mode 100644 mindspore/lite/nnacl/batch_to_space.h delete mode 100644 mindspore/lite/nnacl/batchnorm_parameter.h delete mode 100644 mindspore/lite/nnacl/broadcast_to_parameter.h delete mode 100644 mindspore/lite/nnacl/cast_parameter.h delete mode 100644 mindspore/lite/nnacl/common_func.h delete mode 100644 mindspore/lite/nnacl/concat_parameter.h delete mode 100644 mindspore/lite/nnacl/constant_of_shape_parameter.h delete mode 100644 mindspore/lite/nnacl/conv_parameter.h delete mode 100644 mindspore/lite/nnacl/crop_parameter.h delete mode 100644 mindspore/lite/nnacl/depth_to_space_parameter.h delete mode 100644 mindspore/lite/nnacl/detection_post_process_parameter.h delete mode 100644 mindspore/lite/nnacl/errorcode.h delete mode 100644 mindspore/lite/nnacl/fill_parameter.h delete mode 100644 mindspore/lite/nnacl/fp16/activation_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/arg_min_max_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/arithmetic_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/arithmetic_self_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/cast_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/common_func_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/constant_of_shape_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/conv_depthwise_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/conv_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/crop_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/deconv_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/deconv_winograd_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/exp_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/gru_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/instance_norm_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/lstm_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/matmul_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/matrix_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/pack_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/pad_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/pooling_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/power_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/quant_dtype_cast_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/reduce_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/scale_fp16.c delete mode 100644 mindspore/lite/nnacl/fp16/scale_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/softmax_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/transpose_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/winograd_transform_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16/winograd_utils_fp16.h delete mode 100644 mindspore/lite/nnacl/fp16_grad/activation_grad.h delete mode 100644 mindspore/lite/nnacl/fp16_grad/arithmetic_self_grad.h delete mode 100644 mindspore/lite/nnacl/fp32/activation_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/add_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/adder_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/arg_min_max_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/arithmetic_compare_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/arithmetic_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/arithmetic_self_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/batchnorm_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/broadcast_to_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/common_func_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/constant_of_shape_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/conv_common_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/conv_depthwise_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/conv_winograd_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/crop_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/deconv_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/deconv_winograd_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/detection_post_process_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/div_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/elu_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/embedding_lookup_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/exp_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/gatherNd_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/gru_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/instance_norm_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/invert_permutation_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/l2_norm_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/layer_norm_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/local_response_norm_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/lstm_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/matmul_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/mul_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/one_hot_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/pack_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/pad_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/pooling_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/power_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/prelu_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/prior_box_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/range_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/rank_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/reduce_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/resize_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/reverse_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/reverse_sequence_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/roi_pooling_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/scale_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/scatter_nd_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/softmax_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/sparse_to_dense_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/splice_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/squared_difference.c delete mode 100644 mindspore/lite/nnacl/fp32/squared_difference.h delete mode 100644 mindspore/lite/nnacl/fp32/strided_slice_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/sub_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/topk_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/transpose_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/unique_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/where_fp32.h delete mode 100644 mindspore/lite/nnacl/fp32/winograd_transform.h delete mode 100644 mindspore/lite/nnacl/fp32/winograd_utils.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/activation_grad.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/arithmetic_grad.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/batch_norm.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/binary_cross_entropy.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/binary_cross_entropy_grad.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/convolution_grad_filter.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/dropout_grad.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/dropout_parameter.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/gemm.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/layernorm_grad.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/layernormgrad_parameter.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/optimizer.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/pack_ext.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/pooling_grad.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/reduce_grad.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/resize_grad.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/smooth_l1_loss.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/softmax_grad.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/strided_slice_grad.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/unsorted_segment_sum.h delete mode 100644 mindspore/lite/nnacl/fp32_grad/utils.h delete mode 100644 mindspore/lite/nnacl/gelu_parameter.h delete mode 100644 mindspore/lite/nnacl/gru_parameter.h delete mode 100644 mindspore/lite/nnacl/infer/adam_infer.h delete mode 100644 mindspore/lite/nnacl/infer/add_sub_grad_infer.h delete mode 100644 mindspore/lite/nnacl/infer/addn_infer.h delete mode 100644 mindspore/lite/nnacl/infer/apply_momentum_infer.h delete mode 100644 mindspore/lite/nnacl/infer/argmin_max_infer.h delete mode 100644 mindspore/lite/nnacl/infer/arithmetic_compare_infer.h delete mode 100644 mindspore/lite/nnacl/infer/arithmetic_grad_infer.h delete mode 100644 mindspore/lite/nnacl/infer/arithmetic_infer.h delete mode 100644 mindspore/lite/nnacl/infer/assert_op_infer.h delete mode 100644 mindspore/lite/nnacl/infer/assign_add_infer.h delete mode 100644 mindspore/lite/nnacl/infer/assign_infer.h delete mode 100644 mindspore/lite/nnacl/infer/audio_spectrogram_infer.h delete mode 100644 mindspore/lite/nnacl/infer/batch_to_space_infer.h delete mode 100644 mindspore/lite/nnacl/infer/bias_grad_infer.h delete mode 100644 mindspore/lite/nnacl/infer/binary_cross_entropy_infer.h delete mode 100644 mindspore/lite/nnacl/infer/bn_grad_infer.h delete mode 100644 mindspore/lite/nnacl/infer/broadcast_to_infer.h delete mode 100644 mindspore/lite/nnacl/infer/cast_infer.h delete mode 100644 mindspore/lite/nnacl/infer/common_infer.h delete mode 100644 mindspore/lite/nnacl/infer/concat_infer.h delete mode 100644 mindspore/lite/nnacl/infer/constant_of_shape_infer.h delete mode 100644 mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.h delete mode 100644 mindspore/lite/nnacl/infer/conv2d_grad_input_infer.h delete mode 100644 mindspore/lite/nnacl/infer/conv2d_infer.h delete mode 100644 mindspore/lite/nnacl/infer/crop_and_resize_infer.h delete mode 100644 mindspore/lite/nnacl/infer/crop_infer.h delete mode 100644 mindspore/lite/nnacl/infer/custom_extract_features_infer.h delete mode 100644 mindspore/lite/nnacl/infer/custom_normalize_infer.h delete mode 100644 mindspore/lite/nnacl/infer/custom_predict_infer.h delete mode 100644 mindspore/lite/nnacl/infer/deconv2d_infer.h delete mode 100644 mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.h delete mode 100644 mindspore/lite/nnacl/infer/depth_to_space_infer.h delete mode 100644 mindspore/lite/nnacl/infer/depthwise_conv2d_infer.h delete mode 100644 mindspore/lite/nnacl/infer/detection_post_process_infer.h delete mode 100644 mindspore/lite/nnacl/infer/dropout_grad_infer.h delete mode 100644 mindspore/lite/nnacl/infer/dropout_infer.h delete mode 100644 mindspore/lite/nnacl/infer/embedding_lookup_infer.h delete mode 100644 mindspore/lite/nnacl/infer/expand_dims_infer.h delete mode 100644 mindspore/lite/nnacl/infer/fft_imag_infer.h delete mode 100644 mindspore/lite/nnacl/infer/fft_real_infer.h delete mode 100644 mindspore/lite/nnacl/infer/fill_infer.h delete mode 100644 mindspore/lite/nnacl/infer/flatten_grad_infer.h delete mode 100644 mindspore/lite/nnacl/infer/flatten_infer.h delete mode 100644 mindspore/lite/nnacl/infer/full_connection_infer.h delete mode 100644 mindspore/lite/nnacl/infer/fused_batchnorm_infer.h delete mode 100644 mindspore/lite/nnacl/infer/gather_infer.h delete mode 100644 mindspore/lite/nnacl/infer/gather_nd_infer.h delete mode 100644 mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.h delete mode 100644 mindspore/lite/nnacl/infer/gru_infer.h delete mode 100644 mindspore/lite/nnacl/infer/hashtable_lookup_infer.h delete mode 100644 mindspore/lite/nnacl/infer/infer.h delete mode 100644 mindspore/lite/nnacl/infer/infer_register.h delete mode 100644 mindspore/lite/nnacl/infer/invert_permutation_infer.h delete mode 100644 mindspore/lite/nnacl/infer/layer_norm_grad_infer.h delete mode 100644 mindspore/lite/nnacl/infer/layer_norm_infer.h delete mode 100644 mindspore/lite/nnacl/infer/lin_space_infer.h delete mode 100644 mindspore/lite/nnacl/infer/lsh_projection_infer.h delete mode 100644 mindspore/lite/nnacl/infer/lstm_infer.h delete mode 100644 mindspore/lite/nnacl/infer/matmul_infer.h delete mode 100644 mindspore/lite/nnacl/infer/max_min_grad_infer.h delete mode 100644 mindspore/lite/nnacl/infer/mean_infer.h delete mode 100644 mindspore/lite/nnacl/infer/merge_infer.h delete mode 100644 mindspore/lite/nnacl/infer/mfcc_infer.h delete mode 100644 mindspore/lite/nnacl/infer/non_max_suppression_infer.h delete mode 100644 mindspore/lite/nnacl/infer/one_hot_infer.h delete mode 100644 mindspore/lite/nnacl/infer/pad_infer.h delete mode 100644 mindspore/lite/nnacl/infer/partial_infer.h delete mode 100644 mindspore/lite/nnacl/infer/pooling_grad_infer.h delete mode 100644 mindspore/lite/nnacl/infer/pooling_infer.h delete mode 100644 mindspore/lite/nnacl/infer/power_infer.h delete mode 100644 mindspore/lite/nnacl/infer/prior_box_infer.h delete mode 100644 mindspore/lite/nnacl/infer/quant_dtype_cast_infer.h delete mode 100644 mindspore/lite/nnacl/infer/random_standard_normal_infer.h delete mode 100644 mindspore/lite/nnacl/infer/range_infer.h delete mode 100644 mindspore/lite/nnacl/infer/rank_infer.h delete mode 100644 mindspore/lite/nnacl/infer/reduce_infer.h delete mode 100644 mindspore/lite/nnacl/infer/reshape_infer.h delete mode 100644 mindspore/lite/nnacl/infer/resize_infer.h delete mode 100644 mindspore/lite/nnacl/infer/rfft_infer.h delete mode 100644 mindspore/lite/nnacl/infer/roi_pooling_infer.h delete mode 100644 mindspore/lite/nnacl/infer/scatter_nd_infer.h delete mode 100644 mindspore/lite/nnacl/infer/select_infer.h delete mode 100644 mindspore/lite/nnacl/infer/sgd_infer.h delete mode 100644 mindspore/lite/nnacl/infer/shape_infer.h delete mode 100644 mindspore/lite/nnacl/infer/size_infer.h delete mode 100644 mindspore/lite/nnacl/infer/skip_gram_infer.h delete mode 100644 mindspore/lite/nnacl/infer/slice_infer.h delete mode 100644 mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.h delete mode 100644 mindspore/lite/nnacl/infer/softmax_infer.h delete mode 100644 mindspore/lite/nnacl/infer/space_to_batch_infer.h delete mode 100644 mindspore/lite/nnacl/infer/space_to_batch_nd_infer.h delete mode 100644 mindspore/lite/nnacl/infer/space_to_depth_infer.h delete mode 100644 mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.h delete mode 100644 mindspore/lite/nnacl/infer/sparse_to_dense_infer.h delete mode 100644 mindspore/lite/nnacl/infer/splice_infer.h delete mode 100644 mindspore/lite/nnacl/infer/split_infer.h delete mode 100644 mindspore/lite/nnacl/infer/squeeze_infer.h delete mode 100644 mindspore/lite/nnacl/infer/stack_infer.h delete mode 100644 mindspore/lite/nnacl/infer/strided_slice_grad_infer.h delete mode 100644 mindspore/lite/nnacl/infer/strided_slice_infer.h delete mode 100644 mindspore/lite/nnacl/infer/switch_infer.h delete mode 100644 mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.h delete mode 100644 mindspore/lite/nnacl/infer/tensorlist_getitem_infer.h delete mode 100644 mindspore/lite/nnacl/infer/tensorlist_reserve_infer.h delete mode 100644 mindspore/lite/nnacl/infer/tensorlist_setitem_infer.h delete mode 100644 mindspore/lite/nnacl/infer/tensorlist_stack_infer.h delete mode 100644 mindspore/lite/nnacl/infer/tile_infer.h delete mode 100644 mindspore/lite/nnacl/infer/topk_infer.h delete mode 100644 mindspore/lite/nnacl/infer/transpose_infer.h delete mode 100644 mindspore/lite/nnacl/infer/uniform_real_infer.h delete mode 100644 mindspore/lite/nnacl/infer/unique_infer.h delete mode 100644 mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.h delete mode 100644 mindspore/lite/nnacl/infer/unsqueeze_infer.h delete mode 100644 mindspore/lite/nnacl/infer/unstack_infer.h delete mode 100644 mindspore/lite/nnacl/infer/where_infer.h delete mode 100644 mindspore/lite/nnacl/infer/while_infer.h delete mode 100644 mindspore/lite/nnacl/instance_norm_parameter.h delete mode 100644 mindspore/lite/nnacl/int8/add_int8.h delete mode 100644 mindspore/lite/nnacl/int8/arg_min_max_int8.h delete mode 100644 mindspore/lite/nnacl/int8/arithmetic_int8.h delete mode 100644 mindspore/lite/nnacl/int8/arithmetic_self_int8.h delete mode 100644 mindspore/lite/nnacl/int8/batch_to_space_int8.h delete mode 100644 mindspore/lite/nnacl/int8/batchnorm_int8.h delete mode 100644 mindspore/lite/nnacl/int8/common_func_int8.h delete mode 100644 mindspore/lite/nnacl/int8/concat_int8.h delete mode 100644 mindspore/lite/nnacl/int8/conv1x1_int8.h delete mode 100644 mindspore/lite/nnacl/int8/conv3x3_int8.h delete mode 100644 mindspore/lite/nnacl/int8/conv_depthwise_int8.h delete mode 100644 mindspore/lite/nnacl/int8/conv_int8.h delete mode 100644 mindspore/lite/nnacl/int8/crop_int8.h delete mode 100644 mindspore/lite/nnacl/int8/deconv_int8.h delete mode 100644 mindspore/lite/nnacl/int8/depth_to_space_int8.h delete mode 100644 mindspore/lite/nnacl/int8/div_int8.h delete mode 100644 mindspore/lite/nnacl/int8/fixed_point.h delete mode 100644 mindspore/lite/nnacl/int8/hswish_int8.h delete mode 100644 mindspore/lite/nnacl/int8/l2_norm_int8.h delete mode 100644 mindspore/lite/nnacl/int8/layer_norm_int8.h delete mode 100644 mindspore/lite/nnacl/int8/matmul_int8.h delete mode 100644 mindspore/lite/nnacl/int8/mul_int8.h delete mode 100644 mindspore/lite/nnacl/int8/pack_int8.h delete mode 100644 mindspore/lite/nnacl/int8/pad_int8.h delete mode 100644 mindspore/lite/nnacl/int8/pooling_int8.h delete mode 100644 mindspore/lite/nnacl/int8/power_int8.h delete mode 100644 mindspore/lite/nnacl/int8/quant_dtype_cast_int8.h delete mode 100644 mindspore/lite/nnacl/int8/quantize.h delete mode 100644 mindspore/lite/nnacl/int8/reduce_int8.h delete mode 100644 mindspore/lite/nnacl/int8/relux_int8.h delete mode 100644 mindspore/lite/nnacl/int8/reshape_int8.h delete mode 100644 mindspore/lite/nnacl/int8/resize_int8.h delete mode 100644 mindspore/lite/nnacl/int8/scale_int8.h delete mode 100644 mindspore/lite/nnacl/int8/slice_int8.h delete mode 100644 mindspore/lite/nnacl/int8/softmax_int8.h delete mode 100644 mindspore/lite/nnacl/int8/space_to_batch_int8.h delete mode 100644 mindspore/lite/nnacl/int8/splice_int8.h delete mode 100644 mindspore/lite/nnacl/int8/split_int8.h delete mode 100644 mindspore/lite/nnacl/int8/sub_int8.h delete mode 100644 mindspore/lite/nnacl/int8/tanh_int8.h delete mode 100644 mindspore/lite/nnacl/int8/topk_int8.h delete mode 100644 mindspore/lite/nnacl/int8/transpose_int8.h delete mode 100644 mindspore/lite/nnacl/intrinsics/avx/common_utils.h delete mode 100644 mindspore/lite/nnacl/intrinsics/ms_simd_instructions.h delete mode 100644 mindspore/lite/nnacl/intrinsics/sse/sse_common.h delete mode 100644 mindspore/lite/nnacl/l2_norm_parameter.h delete mode 100644 mindspore/lite/nnacl/layer_norm_parameter.h delete mode 100644 mindspore/lite/nnacl/lsh_projection_parameter.h delete mode 100644 mindspore/lite/nnacl/lstm_parameter.h delete mode 100644 mindspore/lite/nnacl/matmul_parameter.h delete mode 100644 mindspore/lite/nnacl/mul_parameter.h delete mode 100644 mindspore/lite/nnacl/nnacl_common.h delete mode 100644 mindspore/lite/nnacl/nnacl_utils.h delete mode 100644 mindspore/lite/nnacl/non_max_suppression_parameter.h delete mode 100644 mindspore/lite/nnacl/op_base.h delete mode 100644 mindspore/lite/nnacl/pack.h delete mode 100644 mindspore/lite/nnacl/pad_parameter.h delete mode 100644 mindspore/lite/nnacl/pooling_parameter.h delete mode 100644 mindspore/lite/nnacl/power_parameter.h delete mode 100644 mindspore/lite/nnacl/predict_parameter.h delete mode 100644 mindspore/lite/nnacl/prelu_parameter.h delete mode 100644 mindspore/lite/nnacl/prior_box_parameter.h delete mode 100644 mindspore/lite/nnacl/random_parameter.h delete mode 100644 mindspore/lite/nnacl/reduce_parameter.h delete mode 100644 mindspore/lite/nnacl/reshape_parameter.h delete mode 100644 mindspore/lite/nnacl/resize_parameter.h delete mode 100644 mindspore/lite/nnacl/reverse_sequence_parameter.h delete mode 100644 mindspore/lite/nnacl/scale.h delete mode 100644 mindspore/lite/nnacl/sigmoid_parameter.h delete mode 100644 mindspore/lite/nnacl/skip_gram_parameter.h delete mode 100644 mindspore/lite/nnacl/slice_parameter.h delete mode 100644 mindspore/lite/nnacl/softmax_parameter.h delete mode 100644 mindspore/lite/nnacl/sparse_to_dense_parameter.h delete mode 100644 mindspore/lite/nnacl/splice_parameter.h delete mode 100644 mindspore/lite/nnacl/split_parameter.h delete mode 100644 mindspore/lite/nnacl/squeeze_parameter.h delete mode 100644 mindspore/lite/nnacl/stack_parameter.h delete mode 100644 mindspore/lite/nnacl/strided_slice_parameter.h delete mode 100644 mindspore/lite/nnacl/tensor_c.h delete mode 100644 mindspore/lite/nnacl/tensorlist_parameter.h delete mode 100644 mindspore/lite/nnacl/transpose.h delete mode 100644 mindspore/lite/nnacl/unsqueeze_parameter.h delete mode 100644 mindspore/lite/nnacl/unstack_parameter.h delete mode 100644 mindspore/lite/nnacl/upsample_parameter.h delete mode 100644 mindspore/lite/nnacl/where_parameter.h diff --git a/cmake/package_lite.cmake b/cmake/package_lite.cmake index 4110d04cd3..8e8be2db67 100644 --- a/cmake/package_lite.cmake +++ b/cmake/package_lite.cmake @@ -153,15 +153,15 @@ if(PLATFORM_ARM64) COMPONENT ${RUNTIME_COMPONENT_NAME}) install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE) - file(GLOB NNACL_FILES GLOB ${TOP_DIR}/mindspore/lite/nnacl/*.h) + file(GLOB NNACL_FILES GLOB ${NNACL_DIR}/*.h) install(FILES ${NNACL_FILES} DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME}) - install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/base DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl + install(DIRECTORY ${NNACL_DIR}/base DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") - install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/int8 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl + install(DIRECTORY ${NNACL_DIR}/int8 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") - install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/fp32 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl + install(DIRECTORY ${NNACL_DIR}/fp32 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") - install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/intrinsics DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl + install(DIRECTORY ${NNACL_DIR}/intrinsics DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") install(DIRECTORY ${TOP_DIR}/mindspore/lite/micro/coder/wrapper DESTINATION ${CODEGEN_ROOT_DIR}/include COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") @@ -185,15 +185,15 @@ elseif(PLATFORM_ARM32) COMPONENT ${RUNTIME_COMPONENT_NAME}) install(DIRECTORY ${TOP_DIR}/include/api/ DESTINATION ${RUNTIME_INC_DIR}/api COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE) - file(GLOB NNACL_FILES GLOB ${TOP_DIR}/mindspore/lite/nnacl/*.h) + file(GLOB NNACL_FILES GLOB ${NNACL_DIR}/*.h) install(FILES ${NNACL_FILES} DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME}) - install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/base DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl + install(DIRECTORY ${NNACL_DIR}/base DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") - install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/int8 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl + install(DIRECTORY ${NNACL_DIR}/int8 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") - install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/fp32 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl + install(DIRECTORY ${NNACL_DIR}/fp32 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") - install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/intrinsics DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl + install(DIRECTORY ${NNACL_DIR}/intrinsics DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") install(DIRECTORY ${TOP_DIR}/mindspore/lite/micro/coder/wrapper DESTINATION ${CODEGEN_ROOT_DIR}/include COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") @@ -262,15 +262,15 @@ else() install(FILES ${glog_LIBPATH}/libglog.so.0.4.0 DESTINATION ${CONVERTER_ROOT_DIR}/third_party/glog/lib RENAME libglog.so.0 COMPONENT ${RUNTIME_COMPONENT_NAME}) - file(GLOB NNACL_FILES GLOB ${TOP_DIR}/mindspore/lite/nnacl/*.h) + file(GLOB NNACL_FILES GLOB ${NNACL_DIR}/*.h) install(FILES ${NNACL_FILES} DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME}) - install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/base DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl + install(DIRECTORY ${NNACL_DIR}/base DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") - install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/int8 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl + install(DIRECTORY ${NNACL_DIR}/int8 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") - install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/fp32 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl + install(DIRECTORY ${NNACL_DIR}/fp32 DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") - install(DIRECTORY ${TOP_DIR}/mindspore/lite/nnacl/intrinsics DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl + install(DIRECTORY ${NNACL_DIR}/intrinsics DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") install(DIRECTORY ${TOP_DIR}/mindspore/lite/micro/coder/wrapper DESTINATION ${CODEGEN_ROOT_DIR}/include COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index 1c94397526..b9ac4f6e32 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -4,6 +4,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR}) include_directories(${CMAKE_BINARY_DIR}) if(ENABLE_CPU) + include_directories(${CMAKE_CURRENT_SOURCE_DIR}/backend/kernel_compiler/cpu) if("${X86_64_SIMD}" STREQUAL "sse") add_compile_definitions(ENABLE_SSE) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2") @@ -12,8 +13,8 @@ if(ENABLE_CPU) if("${X86_64_SIMD}" STREQUAL "avx") add_compile_definitions(ENABLE_SSE) add_compile_definitions(ENABLE_AVX) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx2") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx2") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.1 -mavx -mavx2") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.1 -mavx -mavx2") endif() endif() diff --git a/mindspore/ccsrc/backend/kernel_compiler/CMakeLists.txt b/mindspore/ccsrc/backend/kernel_compiler/CMakeLists.txt index 2caf568e87..f0e7c3f8ef 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/CMakeLists.txt +++ b/mindspore/ccsrc/backend/kernel_compiler/CMakeLists.txt @@ -29,8 +29,35 @@ if(ENABLE_D) endif() if(ENABLE_CPU) - file(GLOB_RECURSE CPU_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC") + file(GLOB NNACL_C_SRC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} + "cpu/nnacl/*.c" + "cpu/nnacl/fp32/*.c" + "cpu/nnacl/int8/*.c" + "cpu/nnacl/infer/*.c" + "cpu/nnacl/base/*.c" + ) + if("${X86_64_SIMD}" STREQUAL "sse") + file(GLOB NNACL_ASM_SRC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} + "cpu/nnacl/intrinsics/sse/*.c" + ) + set_property(SOURCE ${NNACL_ASM_SRC} PROPERTY LANGUAGE C) + endif() + if("${X86_64_SIMD}" STREQUAL "avx") + file(GLOB NNACL_ASM_SRC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} + "cpu/nnacl/intrinsics/sse/*.c" + "cpu/nnacl/intrinsics/avx/*.c" + "cpu/nnacl/assembly/avx/*.S" + ) + set_property(SOURCE ${NNACL_ASM_SRC} PROPERTY LANGUAGE C) + endif() + set(NNACL_SRC ${NNACL_C_SRC} ${NNACL_ASM_SRC}) + + file(GLOB CPU_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "cpu/*.cc" + "cpu/mkldnn/*.cc" + "cpu/ps/*.cc" + "cpu/quantum/*.cc" ) if(NOT ENABLE_MPI) @@ -102,7 +129,7 @@ if(ENABLE_GPU) # add_library(_mindspore_kernel_cuda_obj OBJECT ${CUDA_SRC_LIST}) endif() -set_property(SOURCE ${KERNEL_SRC_LIST} ${CPU_SRC_LIST} ${GPU_SRC_LIST} ${D_SRC_LIST} +set_property(SOURCE ${KERNEL_SRC_LIST} ${NNACL_SRC} ${CPU_SRC_LIST} ${GPU_SRC_LIST} ${D_SRC_LIST} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_KERNEL) -add_library(_mindspore_backend_kernel_compiler_obj OBJECT ${KERNEL_SRC_LIST} ${CPU_SRC_LIST} +add_library(_mindspore_backend_kernel_compiler_obj OBJECT ${KERNEL_SRC_LIST} ${NNACL_SRC} ${CPU_SRC_LIST} ${GPU_SRC_LIST} ${D_SRC_LIST} ${QUANTUM_SRC_LIST}) diff --git a/mindspore/lite/nnacl/CMakeLists.txt b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/CMakeLists.txt similarity index 100% rename from mindspore/lite/nnacl/CMakeLists.txt rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/CMakeLists.txt diff --git a/mindspore/lite/nnacl/README.md b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/README.md similarity index 100% rename from mindspore/lite/nnacl/README.md rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/README.md diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/adder.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/adder.h new file mode 100644 index 0000000000..b62ab145b5 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/adder.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_ADDER_H_ +#define MINDSPORE_NNACL_ADDER_H_ + +#include "nnacl/op_base.h" + +typedef struct AdderParameter { + OpParameter op_parameter_; +} AdderParameter; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_ADDER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/arg_min_max_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/arg_min_max_parameter.h new file mode 100644 index 0000000000..f6cf61471c --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/arg_min_max_parameter.h @@ -0,0 +1,54 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_ARG_MIN_MAX_PARAMETER_H_ +#define MINDSPORE_NNACL_ARG_MIN_MAX_PARAMETER_H_ + +#ifdef ENABLE_ARM64 +#include +#endif +#include "nnacl/op_base.h" + +typedef int (*COMPARE_FUNCTION)(const void *a, const void *b); + +typedef struct ArgElement { + uint32_t index_; + union ArgData { + int8_t i8_data_; + int32_t i_data_; + float f_data_; +#ifdef ENABLE_ARM64 + float16_t f16_data_; +#endif + } data_; +} ArgElement; + +typedef struct ArgMinMaxParameter { + OpParameter op_parameter_; + bool out_value_; + bool keep_dims_; + bool get_max_; + int32_t axis_; + int32_t topk_; + int32_t axis_type_; + int32_t dims_size_; + int32_t data_type_; // equals to type_id + int32_t in_strides_[COMM_SHAPE_SIZE]; + int32_t out_strides_[COMM_SHAPE_SIZE]; + ArgElement *arg_elements_; +} ArgMinMaxParameter; + +#endif // MINDSPORE_NNACL_ARG_MIN_MAX_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/arithmetic.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/arithmetic.h new file mode 100644 index 0000000000..59fc552de4 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/arithmetic.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_ARTITHMETIC_H_ +#define MINDSPORE_NNACL_ARTITHMETIC_H_ + +#include "nnacl/op_base.h" +#include "nnacl/common_func.h" +#include "nnacl/nnacl_utils.h" + +typedef struct ArithmeticParameter { + OpParameter op_parameter_; + bool broadcasting_; + size_t ndim_; + int activation_type_; + int in_shape0_[10]; + int in_elements_num0_; + int in_shape1_[10]; + int in_elements_num1_; + + int out_shape_[10]; + int out_elements_num_; + + int in_strides0_[10]; + int in_strides1_[10]; + int out_strides_[10]; + + int multiples0_[10]; + int multiples1_[10]; + int eltwise_mode_; // eltwise need +} ArithmeticParameter; + +#endif // MINDSPORE_NNACL_ARTITHMETIC_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/arithmetic_self_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/arithmetic_self_parameter.h new file mode 100644 index 0000000000..a21e305ff5 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/arithmetic_self_parameter.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_ARITHMETIC_SELF_PARAMETER_H_ +#define MINDSPORE_NNACL_ARITHMETIC_SELF_PARAMETER_H_ + +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/int8/quantize.h" + +// For Abs, Cos, Exp, Log, Square, Sqrt, Rsqrt ops. +typedef struct ArithmeticSelfParameter { + OpParameter op_parameter_; + ArithSelfQuantArg quant_arg_; +} ArithmeticSelfParameter; + +#endif // MINDSPORE_NNACL_ARITHMETIC_SELF_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/assembly/arm32/ConvDw3x3Int8BorderPixel.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/ConvDw3x3Int8BorderPixel.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/ConvDw3x3Int8BorderPixel.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/ConvDw3x3Int8BorderPixel.S diff --git a/mindspore/lite/nnacl/assembly/arm32/ConvDwFp32Border.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/ConvDwFp32Border.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/ConvDwFp32Border.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/ConvDwFp32Border.S diff --git a/mindspore/lite/nnacl/assembly/arm32/ConvDwFp32Center.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/ConvDwFp32Center.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/ConvDwFp32Center.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/ConvDwFp32Center.S diff --git a/mindspore/lite/nnacl/assembly/arm32/ConvDwFp32Row.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/ConvDwFp32Row.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/ConvDwFp32Row.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/ConvDwFp32Row.S diff --git a/mindspore/lite/nnacl/assembly/arm32/ConvDwInt8Center.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/ConvDwInt8Center.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/ConvDwInt8Center.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/ConvDwInt8Center.S diff --git a/mindspore/lite/nnacl/assembly/arm32/ConvDwInt8PostAlign4.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/ConvDwInt8PostAlign4.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/ConvDwInt8PostAlign4.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/ConvDwInt8PostAlign4.S diff --git a/mindspore/lite/nnacl/assembly/arm32/ConvDwInt8PostAlign4PerChannel.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/ConvDwInt8PostAlign4PerChannel.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/ConvDwInt8PostAlign4PerChannel.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/ConvDwInt8PostAlign4PerChannel.S diff --git a/mindspore/lite/nnacl/assembly/arm32/ConvDwInt8Row.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/ConvDwInt8Row.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/ConvDwInt8Row.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/ConvDwInt8Row.S diff --git a/mindspore/lite/nnacl/assembly/arm32/DeconvDwFp32Center.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/DeconvDwFp32Center.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/DeconvDwFp32Center.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/DeconvDwFp32Center.S diff --git a/mindspore/lite/nnacl/assembly/arm32/DeconvDwInt8Center.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/DeconvDwInt8Center.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/DeconvDwInt8Center.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/DeconvDwInt8Center.S diff --git a/mindspore/lite/nnacl/assembly/arm32/DeconvDwInt8Post.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/DeconvDwInt8Post.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/DeconvDwInt8Post.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/DeconvDwInt8Post.S diff --git a/mindspore/lite/nnacl/assembly/arm32/IndirectGemmInt16to32_8x4.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/IndirectGemmInt16to32_8x4.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/IndirectGemmInt16to32_8x4.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/IndirectGemmInt16to32_8x4.S diff --git a/mindspore/lite/nnacl/assembly/arm32/IndirectGemmInt8_2x4.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/IndirectGemmInt8_2x4.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/IndirectGemmInt8_2x4.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/IndirectGemmInt8_2x4.S diff --git a/mindspore/lite/nnacl/assembly/arm32/MatVecMulFp32.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/MatVecMulFp32.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/MatVecMulFp32.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/MatVecMulFp32.S diff --git a/mindspore/lite/nnacl/assembly/arm32/MatmulFp32.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/MatmulFp32.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/MatmulFp32.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/MatmulFp32.S diff --git a/mindspore/lite/nnacl/assembly/arm32/MatmulFp32Opt.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/MatmulFp32Opt.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/MatmulFp32Opt.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/MatmulFp32Opt.S diff --git a/mindspore/lite/nnacl/assembly/arm32/MatmulFp32Opt12x4.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/MatmulFp32Opt12x4.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/MatmulFp32Opt12x4.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/MatmulFp32Opt12x4.S diff --git a/mindspore/lite/nnacl/assembly/arm32/MatmulInt8.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/MatmulInt8.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/MatmulInt8.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/MatmulInt8.S diff --git a/mindspore/lite/nnacl/assembly/arm32/MatmulInt8Opt.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/MatmulInt8Opt.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/MatmulInt8Opt.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/MatmulInt8Opt.S diff --git a/mindspore/lite/nnacl/assembly/arm32/MatmulWinogradFp32.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/MatmulWinogradFp32.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/MatmulWinogradFp32.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/MatmulWinogradFp32.S diff --git a/mindspore/lite/nnacl/assembly/arm32/PostFuncBiasReluC4.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/PostFuncBiasReluC4.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/PostFuncBiasReluC4.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/PostFuncBiasReluC4.S diff --git a/mindspore/lite/nnacl/assembly/arm32/PostFuncBiasReluC8.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/PostFuncBiasReluC8.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/PostFuncBiasReluC8.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/PostFuncBiasReluC8.S diff --git a/mindspore/lite/nnacl/assembly/arm32/PreSum4x16Int8Peroc.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/PreSum4x16Int8Peroc.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/PreSum4x16Int8Peroc.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/PreSum4x16Int8Peroc.S diff --git a/mindspore/lite/nnacl/assembly/arm32/PreSum4x16Int8Pert.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/PreSum4x16Int8Pert.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/PreSum4x16Int8Pert.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/PreSum4x16Int8Pert.S diff --git a/mindspore/lite/nnacl/assembly/arm32/TiledC4MatmulFp32.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/TiledC4MatmulFp32.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/TiledC4MatmulFp32.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/TiledC4MatmulFp32.S diff --git a/mindspore/lite/nnacl/assembly/arm32/WinogradTransLeft.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/WinogradTransLeft.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/WinogradTransLeft.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/WinogradTransLeft.S diff --git a/mindspore/lite/nnacl/assembly/arm32/WinogradTransRight.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/WinogradTransRight.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm32/WinogradTransRight.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm32/WinogradTransRight.S diff --git a/mindspore/lite/nnacl/assembly/arm64/AdderFp32.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/AdderFp32.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/AdderFp32.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/AdderFp32.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Fp32Corner.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Fp32Corner.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Fp32Corner.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Fp32Corner.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Fp32Horizontal.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Fp32Horizontal.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Fp32Horizontal.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Fp32Horizontal.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Fp32Stride1.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Fp32Stride1.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Fp32Stride1.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Fp32Stride1.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Fp32Stride2.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Fp32Stride2.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Fp32Stride2.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Fp32Stride2.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Fp32Vertical.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Fp32Vertical.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Fp32Vertical.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Fp32Vertical.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Int8.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Int8.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Int8.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Int8.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Int8Corner.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Int8Corner.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Int8Corner.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Int8Corner.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Int8Horizontal.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Int8Horizontal.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Int8Horizontal.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Int8Horizontal.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Int8Stride2.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Int8Stride2.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Int8Stride2.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Int8Stride2.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Int8Vertical.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Int8Vertical.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDw3x3Int8Vertical.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDw3x3Int8Vertical.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDwFp32Border.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwFp32Border.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDwFp32Border.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwFp32Border.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDwFp32Center.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwFp32Center.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDwFp32Center.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwFp32Center.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDwFp32Indirect3x3.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwFp32Indirect3x3.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDwFp32Indirect3x3.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwFp32Indirect3x3.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDwFp32Indirect5x5.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwFp32Indirect5x5.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDwFp32Indirect5x5.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwFp32Indirect5x5.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDwFp32Row.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwFp32Row.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDwFp32Row.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwFp32Row.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDwInt8Center.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwInt8Center.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDwInt8Center.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwInt8Center.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDwInt8PostAlign4.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwInt8PostAlign4.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDwInt8PostAlign4.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwInt8PostAlign4.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDwInt8PostAlign4PerChannel.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwInt8PostAlign4PerChannel.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDwInt8PostAlign4PerChannel.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwInt8PostAlign4PerChannel.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvDwInt8Row.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwInt8Row.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvDwInt8Row.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvDwInt8Row.S diff --git a/mindspore/lite/nnacl/assembly/arm64/ConvFp32Center.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvFp32Center.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/ConvFp32Center.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/ConvFp32Center.S diff --git a/mindspore/lite/nnacl/assembly/arm64/DeconvDwFp32Border.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/DeconvDwFp32Border.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/DeconvDwFp32Border.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/DeconvDwFp32Border.S diff --git a/mindspore/lite/nnacl/assembly/arm64/DeconvDwFp32Center.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/DeconvDwFp32Center.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/DeconvDwFp32Center.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/DeconvDwFp32Center.S diff --git a/mindspore/lite/nnacl/assembly/arm64/DeconvDwInt8Center.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/DeconvDwInt8Center.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/DeconvDwInt8Center.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/DeconvDwInt8Center.S diff --git a/mindspore/lite/nnacl/assembly/arm64/DeconvDwInt8Post.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/DeconvDwInt8Post.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/DeconvDwInt8Post.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/DeconvDwInt8Post.S diff --git a/mindspore/lite/nnacl/assembly/arm64/IndirectGemmInt16to32_8x4.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/IndirectGemmInt16to32_8x4.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/IndirectGemmInt16to32_8x4.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/IndirectGemmInt16to32_8x4.S diff --git a/mindspore/lite/nnacl/assembly/arm64/MatVecMulFp32.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/MatVecMulFp32.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/MatVecMulFp32.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/MatVecMulFp32.S diff --git a/mindspore/lite/nnacl/assembly/arm64/MatmulFp32.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/MatmulFp32.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/MatmulFp32.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/MatmulFp32.S diff --git a/mindspore/lite/nnacl/assembly/arm64/MatmulFp32Opt.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/MatmulFp32Opt.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/MatmulFp32Opt.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/MatmulFp32Opt.S diff --git a/mindspore/lite/nnacl/assembly/arm64/MatmulInt8.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/MatmulInt8.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/MatmulInt8.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/MatmulInt8.S diff --git a/mindspore/lite/nnacl/assembly/arm64/MatmulInt8Opt.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/MatmulInt8Opt.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/MatmulInt8Opt.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/MatmulInt8Opt.S diff --git a/mindspore/lite/nnacl/assembly/arm64/MatmulR4Int8.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/MatmulR4Int8.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/MatmulR4Int8.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/MatmulR4Int8.S diff --git a/mindspore/lite/nnacl/assembly/arm64/MatmulWinogradFp32.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/MatmulWinogradFp32.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/MatmulWinogradFp32.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/MatmulWinogradFp32.S diff --git a/mindspore/lite/nnacl/assembly/arm64/PostFuncBiasReluC4.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/PostFuncBiasReluC4.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/PostFuncBiasReluC4.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/PostFuncBiasReluC4.S diff --git a/mindspore/lite/nnacl/assembly/arm64/PostFuncBiasReluC8.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/PostFuncBiasReluC8.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/PostFuncBiasReluC8.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/PostFuncBiasReluC8.S diff --git a/mindspore/lite/nnacl/assembly/arm64/PostFuncInt8C4Neon64.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/PostFuncInt8C4Neon64.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/PostFuncInt8C4Neon64.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/PostFuncInt8C4Neon64.S diff --git a/mindspore/lite/nnacl/assembly/arm64/PreSum4x16Int8Peroc.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/PreSum4x16Int8Peroc.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/PreSum4x16Int8Peroc.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/PreSum4x16Int8Peroc.S diff --git a/mindspore/lite/nnacl/assembly/arm64/PreSum4x16Int8Pert.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/PreSum4x16Int8Pert.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/PreSum4x16Int8Pert.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/PreSum4x16Int8Pert.S diff --git a/mindspore/lite/nnacl/assembly/arm64/TiledC4MatmulFp32.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/TiledC4MatmulFp32.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/TiledC4MatmulFp32.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/TiledC4MatmulFp32.S diff --git a/mindspore/lite/nnacl/assembly/arm64/WinogradTransLeft.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/WinogradTransLeft.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/WinogradTransLeft.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/WinogradTransLeft.S diff --git a/mindspore/lite/nnacl/assembly/arm64/WinogradTransRight.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/WinogradTransRight.S similarity index 100% rename from mindspore/lite/nnacl/assembly/arm64/WinogradTransRight.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/arm64/WinogradTransRight.S diff --git a/mindspore/lite/nnacl/assembly/avx/ConvDwFp32Avx3x3.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/avx/ConvDwFp32Avx3x3.S similarity index 100% rename from mindspore/lite/nnacl/assembly/avx/ConvDwFp32Avx3x3.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/avx/ConvDwFp32Avx3x3.S diff --git a/mindspore/lite/nnacl/assembly/avx/ConvDwFp32BorderAvx.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/avx/ConvDwFp32BorderAvx.S similarity index 100% rename from mindspore/lite/nnacl/assembly/avx/ConvDwFp32BorderAvx.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/avx/ConvDwFp32BorderAvx.S diff --git a/mindspore/lite/nnacl/assembly/avx/ConvDwFp32RowAvx.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/avx/ConvDwFp32RowAvx.S similarity index 100% rename from mindspore/lite/nnacl/assembly/avx/ConvDwFp32RowAvx.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/avx/ConvDwFp32RowAvx.S diff --git a/mindspore/lite/nnacl/assembly/avx/MatmulAvx.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/avx/MatmulAvx.S similarity index 100% rename from mindspore/lite/nnacl/assembly/avx/MatmulAvx.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/avx/MatmulAvx.S diff --git a/mindspore/lite/nnacl/assembly/fp16/ConvDwFp16Border.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/ConvDwFp16Border.S similarity index 100% rename from mindspore/lite/nnacl/assembly/fp16/ConvDwFp16Border.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/ConvDwFp16Border.S diff --git a/mindspore/lite/nnacl/assembly/fp16/ConvDwFp16Center.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/ConvDwFp16Center.S similarity index 100% rename from mindspore/lite/nnacl/assembly/fp16/ConvDwFp16Center.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/ConvDwFp16Center.S diff --git a/mindspore/lite/nnacl/assembly/fp16/ConvDwFp16Row.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/ConvDwFp16Row.S similarity index 100% rename from mindspore/lite/nnacl/assembly/fp16/ConvDwFp16Row.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/ConvDwFp16Row.S diff --git a/mindspore/lite/nnacl/assembly/fp16/DeconvDwFp16Border.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/DeconvDwFp16Border.S similarity index 100% rename from mindspore/lite/nnacl/assembly/fp16/DeconvDwFp16Border.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/DeconvDwFp16Border.S diff --git a/mindspore/lite/nnacl/assembly/fp16/DeconvDwFp16Center.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/DeconvDwFp16Center.S similarity index 100% rename from mindspore/lite/nnacl/assembly/fp16/DeconvDwFp16Center.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/DeconvDwFp16Center.S diff --git a/mindspore/lite/nnacl/assembly/fp16/Float16ToFloat32.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/Float16ToFloat32.S similarity index 100% rename from mindspore/lite/nnacl/assembly/fp16/Float16ToFloat32.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/Float16ToFloat32.S diff --git a/mindspore/lite/nnacl/assembly/fp16/Float32ToFloat16.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/Float32ToFloat16.S similarity index 100% rename from mindspore/lite/nnacl/assembly/fp16/Float32ToFloat16.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/Float32ToFloat16.S diff --git a/mindspore/lite/nnacl/assembly/fp16/IndirectGemmFp16_16x8.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/IndirectGemmFp16_16x8.S similarity index 100% rename from mindspore/lite/nnacl/assembly/fp16/IndirectGemmFp16_16x8.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/IndirectGemmFp16_16x8.S diff --git a/mindspore/lite/nnacl/assembly/fp16/MatVecMulFp16.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/MatVecMulFp16.S similarity index 100% rename from mindspore/lite/nnacl/assembly/fp16/MatVecMulFp16.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/MatVecMulFp16.S diff --git a/mindspore/lite/nnacl/assembly/fp16/MatmulFp16.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/MatmulFp16.S similarity index 100% rename from mindspore/lite/nnacl/assembly/fp16/MatmulFp16.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/MatmulFp16.S diff --git a/mindspore/lite/nnacl/assembly/fp16/MatmulFp16Opt.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/MatmulFp16Opt.S similarity index 100% rename from mindspore/lite/nnacl/assembly/fp16/MatmulFp16Opt.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/MatmulFp16Opt.S diff --git a/mindspore/lite/nnacl/assembly/fp16/MatmulWinogradFp16.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/MatmulWinogradFp16.S similarity index 100% rename from mindspore/lite/nnacl/assembly/fp16/MatmulWinogradFp16.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/MatmulWinogradFp16.S diff --git a/mindspore/lite/nnacl/assembly/fp16/PostFuncBiasReluC4Fp16.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/PostFuncBiasReluC4Fp16.S similarity index 100% rename from mindspore/lite/nnacl/assembly/fp16/PostFuncBiasReluC4Fp16.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/PostFuncBiasReluC4Fp16.S diff --git a/mindspore/lite/nnacl/assembly/fp16/PostFuncBiasReluC8Fp16.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/PostFuncBiasReluC8Fp16.S similarity index 100% rename from mindspore/lite/nnacl/assembly/fp16/PostFuncBiasReluC8Fp16.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/PostFuncBiasReluC8Fp16.S diff --git a/mindspore/lite/nnacl/assembly/fp16/TiledC4MatmulFp16.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/TiledC4MatmulFp16.S similarity index 100% rename from mindspore/lite/nnacl/assembly/fp16/TiledC4MatmulFp16.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/TiledC4MatmulFp16.S diff --git a/mindspore/lite/nnacl/assembly/fp16/WinogradTransLeftFp16.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/WinogradTransLeftFp16.S similarity index 100% rename from mindspore/lite/nnacl/assembly/fp16/WinogradTransLeftFp16.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/WinogradTransLeftFp16.S diff --git a/mindspore/lite/nnacl/assembly/fp16/WinogradTransRightFp16.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/WinogradTransRightFp16.S similarity index 100% rename from mindspore/lite/nnacl/assembly/fp16/WinogradTransRightFp16.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/fp16/WinogradTransRightFp16.S diff --git a/mindspore/lite/nnacl/assembly/opt/MatmulDpInt8.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/opt/MatmulDpInt8.S similarity index 100% rename from mindspore/lite/nnacl/assembly/opt/MatmulDpInt8.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/opt/MatmulDpInt8.S diff --git a/mindspore/lite/nnacl/assembly/opt/MatmulDpInt8Opt.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/opt/MatmulDpInt8Opt.S similarity index 100% rename from mindspore/lite/nnacl/assembly/opt/MatmulDpInt8Opt.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/opt/MatmulDpInt8Opt.S diff --git a/mindspore/lite/nnacl/assembly/opt/MatmulOptR4Int8.S b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/opt/MatmulOptR4Int8.S similarity index 100% rename from mindspore/lite/nnacl/assembly/opt/MatmulOptR4Int8.S rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/opt/MatmulOptR4Int8.S diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly_global.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly_global.h new file mode 100644 index 0000000000..2e92e91f34 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly_global.h @@ -0,0 +1,36 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_ASSEMBLY_GLOBAL_H +#define MINDSPORE_NNACL_ASSEMBLY_GLOBAL_H + +// clang-format off +.macro asm_function fname +#ifdef __APPLE__ +.globl _\fname +_\fname: +#else +.global \fname +#ifdef __ELF__ +.hidden \fname +.type \fname, %function +#endif +\fname: +#endif +.endm + +// clang-format on + +#endif // MINDSPORE_NNACL_ASSEMBLY_GLOBAL_H diff --git a/mindspore/lite/nnacl/base/arithmetic_base.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/arithmetic_base.c similarity index 100% rename from mindspore/lite/nnacl/base/arithmetic_base.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/arithmetic_base.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/arithmetic_base.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/arithmetic_base.h new file mode 100644 index 0000000000..cc8231d54f --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/arithmetic_base.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_BASE_ARITHMETIC_BASE_H_ +#define MINDSPORE_NNACL_BASE_ARITHMETIC_BASE_H_ + +#include "nnacl/arithmetic.h" +#include "nnacl/nnacl_utils.h" +#include "nnacl/nnacl_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void CalcMultiplesAndStrides(ArithmeticParameter *param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_BASE_ARITHMETIC_BASE_H_ diff --git a/mindspore/lite/nnacl/base/batch_to_space_base.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/batch_to_space_base.c similarity index 100% rename from mindspore/lite/nnacl/base/batch_to_space_base.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/batch_to_space_base.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/batch_to_space_base.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/batch_to_space_base.h new file mode 100644 index 0000000000..6772482970 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/batch_to_space_base.h @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_BATCH_TO_SPACE_BASE_H_ +#define MINDSPORE_NNACL_BATCH_TO_SPACE_BASE_H_ + +#include +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +void BatchToSpaceNoCropForNHWC(const void *input, void *output, const int *in_shape, int out_n, const int *block, + int data_size); +void BatchToSpaceForNHWC(const void *input, void *output, const int *in_shape, int out_n, const int *block, + const int *crops, int data_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_BATCH_TO_SPACE_BASE_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/cast_base.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/cast_base.h new file mode 100644 index 0000000000..caecc7e929 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/cast_base.h @@ -0,0 +1,104 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_CAST_BASE_H_ +#define MINDSPORE_NNACL_CAST_BASE_H_ + +#include "nnacl/op_base.h" +#include "nnacl/nnacl_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +inline void BoolToFloat32(const bool *input, float *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (float)input[i]; + } +} + +inline void Uint8ToFloat32(const uint8_t *input, float *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (float)input[i]; + } +} + +inline void Int32ToFloat32(const int32_t *input, float *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (float)input[i]; + } +} + +inline void Int64ToFloat32(const int64_t *input, float *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (float)input[i]; + } +} + +#ifdef ENABLE_FP16 +inline void Int64ToFp16(const int64_t *input, float16_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (float16_t)input[i]; + } +} +#endif + +inline void Fp16ToFloat32(const uint16_t *input, float *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = ShortToFloat32(input[i]); + } +} + +inline void Float32ToFp16(const float *input, uint16_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = Float32ToShort(input[i]); + } +} + +inline void Float32ToInt32(const float *input, int32_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (int32_t)input[i]; + } +} + +inline void Float32ToInt64(const float *input, int64_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (int64_t)input[i]; + } +} + +inline void Int32ToInt64(const int32_t *input, int64_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (int64_t)input[i]; + } +} + +inline void Float32ToInt16(const float *input, int16_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (int16_t)input[i]; + } +} + +inline void BoolToInt32(const bool *input, int32_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (int32_t)input[i]; + } +} + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_CAST_BASE_H_ diff --git a/mindspore/lite/nnacl/base/concat_base.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/concat_base.c similarity index 100% rename from mindspore/lite/nnacl/base/concat_base.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/concat_base.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/concat_base.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/concat_base.h new file mode 100644 index 0000000000..6fefcabd3e --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/concat_base.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_CONCAT_BASE_H_ +#define MINDSPORE_NNACL_FP32_CONCAT_BASE_H_ + +#include +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Concat(void **input, int input_num, int axis, int **inputs_output_shape, size_t shape_size, void *output, + int task_id, int thread_num, int data_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_CONCAT_BASE_H_ diff --git a/mindspore/lite/nnacl/base/conv1x1_base.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/conv1x1_base.c similarity index 100% rename from mindspore/lite/nnacl/base/conv1x1_base.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/conv1x1_base.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/conv1x1_base.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/conv1x1_base.h new file mode 100644 index 0000000000..e6e5cefed8 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/conv1x1_base.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_BASE_CONV1X1_BASE_H_ +#define MINDSPORE_NNACL_BASE_CONV1X1_BASE_H_ + +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void Conv1x1InputPack(const void *src_ptr, void *dst_ptr, ConvParameter *conv_param, int data_size); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_BASE_CONV1X1_BASE_H_ diff --git a/mindspore/lite/nnacl/base/depth_to_space_base.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/depth_to_space_base.c similarity index 100% rename from mindspore/lite/nnacl/base/depth_to_space_base.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/depth_to_space_base.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/depth_to_space_base.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/depth_to_space_base.h new file mode 100644 index 0000000000..729959b645 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/depth_to_space_base.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_DEPTH_TO_SPACE_H_ +#define MINDSPORE_NNACL_DEPTH_TO_SPACE_H_ + +#include +#include "nnacl/depth_to_space_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void DepthToSpaceForNHWC(const void *input, void *output, const int *in_shape, const DepthToSpaceParameter *param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_DEPTH_TO_SPACE_H_ diff --git a/mindspore/lite/nnacl/base/fill_base.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/fill_base.c similarity index 100% rename from mindspore/lite/nnacl/base/fill_base.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/fill_base.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/fill_base.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/fill_base.h new file mode 100644 index 0000000000..caf3ebfc19 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/fill_base.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FILL_BASE_H_ +#define MINDSPORE_NNACL_FILL_BASE_H_ + +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/fill_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int FillFp32(float *output, int size, float data); +int FillInt32(int *output, int size, int data); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FILL_BASE_H_ diff --git a/mindspore/lite/nnacl/base/gather_base.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/gather_base.c similarity index 100% rename from mindspore/lite/nnacl/base/gather_base.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/gather_base.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/gather_base.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/gather_base.h new file mode 100644 index 0000000000..f028af5c74 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/gather_base.h @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_GATHER_BASE_H_ +#define MINDSPORE_NNACL_GATHER_BASE_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif +int Gather(const void *input, int outer_size, int inner_size, int limit, const int *indices, int indices_element_size, + void *output, int data_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_GATHER_BASE_H_ diff --git a/mindspore/lite/nnacl/base/minimal_filtering_generator.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/minimal_filtering_generator.c similarity index 100% rename from mindspore/lite/nnacl/base/minimal_filtering_generator.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/minimal_filtering_generator.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/minimal_filtering_generator.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/minimal_filtering_generator.h new file mode 100644 index 0000000000..a72f98d54f --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/minimal_filtering_generator.h @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_MINIMAL_FILTERING_GENERATOR_H_ +#define MINDSPORE_NNACL_MINIMAL_FILTERING_GENERATOR_H_ + +#ifdef ENABLE_ARM +#include +#endif +#include +#include "nnacl/pack.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Polynomial(const float *interval, float *m, int degree); + +void DiagonalPlusMatrix(const float *matrix, float *diagonal_matrix, int degree); + +void ResidueMatrix(const float *interval, float *b, int row, int col); + +int LT(const float *poly_array, float *matrix_lt, int n); + +void T(const float *poly_array, float *matrix_t, int n); + +int B(const float *poly_array, float *matrix_b, int in_unit); + +void GenerateIntervalArray(float *array, float interval, int degree); + +void MatrixTranspose(const float *matrix, float *trans_matrix, int row, int col); + +void MatrixMultiply(const float *matrix_a, const float *matrix_b, float *matrix_c, int m, int k, int n); + +int CookToomFilter(float *matrix_a, float *matrix_at, float *matrix_b, float *matrix_bt, float *matrix_g, + float *matrix_gt, float coefficient, int out_unit, int filter_size); +void MatrixMultiplyWinograd(const float *matix_a, const float *matrix_b, float *matrix_c, int m, int k, int n, + int in_channel, int c4_channel); + +int WinogradWeightTransform(const float *weight_data, float *winograd_data, float *matrix_g, const float *matrix_gt, + int oc_block, int input_unit_, int kernel_unit_, int channel, int batch, bool pack); + +#if defined(ENABLE_ARM) || defined(ENABLE_SSE) +void MatrixMultiplyVec(const MS_FLOAT32X4 *matrix_a, const MS_FLOAT32X4 *matrix_b, MS_FLOAT32X4 *matrix_c, + const float *bias, int m, int k, int n); +#endif +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_MINIMAL_FILTERING_GENERATOR_H_ diff --git a/mindspore/lite/nnacl/base/slice_base.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/slice_base.c similarity index 100% rename from mindspore/lite/nnacl/base/slice_base.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/slice_base.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/slice_base.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/slice_base.h new file mode 100644 index 0000000000..ab2b876ef6 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/slice_base.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_BASE_SLICE_BASE_H_ +#define MINDSPORE_NNACL_BASE_SLICE_BASE_H_ + +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/slice_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void PadSliceParameterTo4D(SliceParameter *param); + +void DoSlice(const void *input, void *output, SliceParameter *param, int thread_id, int data_size); +void DoSliceNoParallel(const void *input, void *output, SliceParameter *param, int data_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_BASE_SLICE_BASE_H_ diff --git a/mindspore/lite/nnacl/base/space_to_depth_base.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/space_to_depth_base.c similarity index 100% rename from mindspore/lite/nnacl/base/space_to_depth_base.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/space_to_depth_base.c diff --git a/mindspore/lite/nnacl/base/space_to_depth_base.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/space_to_depth_base.h similarity index 100% rename from mindspore/lite/nnacl/base/space_to_depth_base.h rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/space_to_depth_base.h diff --git a/mindspore/lite/nnacl/base/split_base.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/split_base.c similarity index 100% rename from mindspore/lite/nnacl/base/split_base.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/split_base.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/split_base.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/split_base.h new file mode 100644 index 0000000000..c6b554ae6a --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/split_base.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_NNACL_SPLIT_BASE_H_ +#define MINDSPORE_NNACL_NNACL_SPLIT_BASE_H_ + +#include "nnacl/op_base.h" +#include "nnacl/split_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int DoSplit(void *in_data, void **out_data, const int *input_shape, int offset, int num_unit, + SplitParameter *split_param, int data_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_NNACL_SPLIT_BASE_H_ diff --git a/mindspore/lite/nnacl/base/stack_base.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/stack_base.c similarity index 100% rename from mindspore/lite/nnacl/base/stack_base.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/stack_base.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/stack_base.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/stack_base.h new file mode 100644 index 0000000000..83c364c90f --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/stack_base.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_STACK_H_ +#define MINDSPORE_NNACL_STACK_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/stack_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Stack(char **inputs, char *output, size_t input_num, size_t copy_size, size_t outter_size); +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_STACK_H_ diff --git a/mindspore/lite/nnacl/base/tile_base.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/tile_base.c similarity index 100% rename from mindspore/lite/nnacl/base/tile_base.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/tile_base.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/tile_base.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/tile_base.h new file mode 100644 index 0000000000..0af167e93b --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/tile_base.h @@ -0,0 +1,53 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_BASE_TILE_H_ +#define MINDSPORE_NNACL_BASE_TILE_H_ + +#include "nnacl/op_base.h" + +typedef struct TileParameter { + // primitive parameter + OpParameter op_parameter_; + int multiples_[5]; + int dims_[5]; + size_t dims_size_; + size_t multiples_size_; + + // shape correlative + int in_shape_[5]; + int out_shape_[5]; + int in_strides_[5]; + int out_strides_[5]; + + // other parameter + int in_dim_; + size_t data_size_; + size_t fast_outer_size_; + size_t fast_stride_; + size_t fast_multiple_; +} TileParameter; + +#ifdef __cplusplus +extern "C" { +#endif +void Tile(void *input_data, void *output_data, TileParameter *parameter); +void TileSimple(void *input_data, void *output_data, size_t begin, size_t end, TileParameter *parameter); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_BASE_TILE_H_ diff --git a/mindspore/lite/nnacl/base/unstack_base.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/unstack_base.c similarity index 100% rename from mindspore/lite/nnacl/base/unstack_base.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/unstack_base.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/unstack_base.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/unstack_base.h new file mode 100644 index 0000000000..d4915a4823 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/unstack_base.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_UNSTACK_H_ +#define MINDSPORE_NNACL_UNSTACK_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/unstack_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Unstack(const void *input, void **output, UnstackParameter *para, int data_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_UNSTACK_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/zeroslike_base.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/zeroslike_base.h new file mode 100644 index 0000000000..333aa8d806 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/zeroslike_base.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_ZEROSLIKE_BASE_H_ +#define MINDSPORE_NNACL_ZEROSLIKE_BASE_H_ + +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +static inline void ApproximateZerosLike(void *output, int number, int data_size) { + memset(output, 0.0, number * data_size); + return; +} + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_ZEROSLIKE_BASE_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/batch_to_space.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/batch_to_space.h new file mode 100644 index 0000000000..646792b614 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/batch_to_space.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_BATCH_TO_SPACE_H_ +#define MINDSPORE_NNACL_BATCH_TO_SPACE_H_ + +#include +#include "nnacl/op_base.h" + +#define BATCH_TO_SPACE_BLOCK_SHAPE_SIZE 2 + +typedef struct BatchToSpaceParameter { + OpParameter op_parameter_; + int32_t block_shape_[BATCH_TO_SPACE_BLOCK_SHAPE_SIZE]; + int32_t crops_[COMM_SHAPE_SIZE]; + bool no_crop_; +} BatchToSpaceParameter; + +#endif // MINDSPORE_NNACL_FP32_BATCH_TO_SPACE_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/batchnorm_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/batchnorm_parameter.h new file mode 100644 index 0000000000..2b2cef70ce --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/batchnorm_parameter.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_BATCHNORM_PARAMETER_H_ +#define MINDSPORE_NNACL_BATCHNORM_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct BatchNormParameter { + OpParameter op_parameter_; + float epsilon_; + float momentum_; + int unit_; + int units_; + int channel_; + bool fused_; +} BatchNormParameter; + +#endif // MINDSPORE_NNACL_BATCHNORM_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/broadcast_to_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/broadcast_to_parameter.h new file mode 100644 index 0000000000..074dbb9111 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/broadcast_to_parameter.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_BROADCAST_TO_PARAMETER_H_ +#define MINDSPORE_NNACL_FP32_BROADCAST_TO_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct BroadcastToParameter { + OpParameter op_parameter_; + int shape_[COMM_SHAPE_SIZE]; + size_t shape_size_; +} BroadcastToParameter; + +typedef struct BroadcastShapeInfo { + int input_shape_[COMM_SHAPE_SIZE]; + int input_shape_size_; + int output_shape_[COMM_SHAPE_SIZE]; + int output_shape_size_; +} BroadcastShapeInfo; + +#endif // MINDSPORE_NNACL_FP32_BROADCAST_TO_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/cast_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/cast_parameter.h new file mode 100644 index 0000000000..7a1e1f96c0 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/cast_parameter.h @@ -0,0 +1,27 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_CAST_PARAMETER_H_ +#define MINDSPORE_NNACL_CAST_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct CastParameter { + OpParameter op_parameter_; + int dst_type_; + int src_type_; +} CastParameter; + +#endif // MINDSPORE_NNACL_CAST_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/common_func.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/common_func.c similarity index 100% rename from mindspore/lite/nnacl/common_func.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/common_func.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/common_func.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/common_func.h new file mode 100644 index 0000000000..f7ca4f0b2c --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/common_func.h @@ -0,0 +1,77 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_COMMON_FUNC_H_ +#define MINDSPORE_NNACL_COMMON_FUNC_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/nnacl_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int8_t MinInt8(int8_t a, int8_t b); +int8_t MaxInt8(int8_t a, int8_t b); +void ReluFp32(float *data, float *dst, int ele_num); +void Relu6Fp32(float *data, float *dst, int ele_num); +#ifdef ENABLE_AVX +#ifdef WIN32 +void ReluFp32C8(float *data, float *dst, int ele_num); +void Relu6Fp32C8(float *data, float *dst, int ele_num); +#endif +#endif +int offset(const int *shape, const int dim0, const int dim1, const int dim2, const int dim3); +int offsetComm(const int *shape, const int dim0, const int dim1, const int dim2); +int offset4d(const int *shape, const int *dims); + +static inline bool isAddOverflow(int32_t x, int32_t y) { + int32_t sum = x + y; + return (x > 0 && y > 0 && sum < 0) || (x < 0 && y < 0 && sum > 0); +} + +static inline bool isMulOverflow(int32_t x, int32_t y) { + int32_t p = x * y; + return (x != 0) && (p / x != y); +} + +static inline int GetStride(int *strides, const int *shape, int length) { + if (length <= 0) { + return 1; + } + int stride = 1; + for (int i = length - 1; i >= 0; --i) { + strides[i] = stride; + stride *= shape[i]; + } + return stride; +} + +#ifdef ENABLE_ARM64 +void BiasAdd(const float *bias, float *data, size_t oc4, size_t plan_size); +void BiasAddRelu6(const float *bias, float *data, size_t oc4, size_t plan_size); +void BiasAddRelu(const float *bias, float *data, size_t oc4, size_t plan_size); +void Relu6(float *data, size_t element4); +void Relu(float *data, size_t element4); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* MINDSPORE_NNACL_COMMON_FUNC_H_ */ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/concat_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/concat_parameter.h new file mode 100644 index 0000000000..a09201aa7c --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/concat_parameter.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_CONCAT_PARAMETER_H_ +#define MINDSPORE_NNACL_CONCAT_PARAMETER_H_ + +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +typedef struct ConcatParameter { + OpParameter op_parameter_; + ConcatQuantArg quant_arg_; + int axis_; + int thread_count_; + int input_num_; + int **input_shapes_; + int *output_shapes_; + int64_t after_axis_size; + int64_t count_unit_; +} ConcatParameter; + +#endif // MINDSPORE_NNACL_CONCAT_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/constant_of_shape_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/constant_of_shape_parameter.h new file mode 100644 index 0000000000..ec41dafd11 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/constant_of_shape_parameter.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_CONSTANT_OF_SHAPE_PARAMETER_H_ +#define MINDSPORE_NNACL_CONSTANT_OF_SHAPE_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct ConstantOfShapeParameter { + OpParameter op_parameter_; + union value_ { + float f32_value_; + int32_t int32_value_; + } value_; + int data_type_; + int element_size_; +} ConstantOfShapeParameter; + +#endif // MINDSPORE_NNACL_CONSTANT_OF_SHAPE_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/conv_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/conv_parameter.h new file mode 100644 index 0000000000..bb322add22 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/conv_parameter.h @@ -0,0 +1,131 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_CONV_PARAMETER_H_ +#define MINDSPORE_NNACL_CONV_PARAMETER_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +typedef struct ConvParameter { + OpParameter op_parameter_; + ConvQuantArg conv_quant_arg_; + int kernel_h_; + int kernel_w_; + int stride_h_; + int stride_w_; + int dilation_h_; + int dilation_w_; + int pad_u_; + int pad_d_; + int pad_l_; + int pad_r_; + int group_; + int tile_num_; + int input_batch_; + int input_h_; + int input_w_; + int input_channel_; + int output_batch_; + int output_h_; + int output_w_; + int output_channel_; + int thread_num_; + int input_unit_; + int output_unit_; + PadMode pad_mode_; + ActType act_type_; + int channel_multiplie_; + int output_padding_w_; + int output_padding_h_; +} ConvParameter; + +typedef struct SlidingWindowParam { + int left_; + int right_; + int top_; + int bottom_; + int c_block_; + int block_channel_; + int ic4_channel_; + int out_step_; + int out_h_step_; + int in_step_; + int in_h_step_; + int in_sh_step_; // stride H + int in_sw_step_; // stride W + int in_kh_step_; // kernel H + int in_kw_step_; // kernel W + int kernel_step_; +} SlidingWindowParam; + +#define OUPUT_UNIT 2 +#define DECONV_WINOGRAD_DEFAULT_UNIT 3 +#define DECONV_WINOGRAD_DEFAULT_TILE 8 +#define DECONV_WINOGRAD_BUFFER_COUNT 8 +typedef struct DeConvWg { + void *b_buffer_; + void *AT_; + void *BT_; + + int kh_; + int kw_; + + int k_; + int i_; + int o_; +} DeConvWg; + +typedef struct DeConvWgABuffer { + bool buf_init_; + void *middle_buffer_; + void *dest_buffer_; +} DeConvWgABuffer; + +typedef struct DeConvComputeUnit { + void *weight_; + void *tmp_buffer_; + int w_start_; + int h_start_; + int w_size_; + int h_size_; + bool use_winograd_; + DeConvWg winograd_; +} DeConvComputeUnit; + +typedef struct DeConvParam { + DeConvComputeUnit *compute_units_; + int compute_size_; + DeConvWgABuffer a_buffer_[DECONV_WINOGRAD_BUFFER_COUNT]; + int input_plane_; + int output_plane_; + int kernel_plane_; + int ic_div4_; + int oc_div4_; + int ic_up4_; + int oc_up4_; + int thread_num_; + int in_tile_count_; + int in_tile_h_count_; + int in_tile_w_count_; + int out_tile_h_; + int out_tile_w_; +} DeConvParam; + +#endif // MINDSPORE_NNACL_CONV_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/crop_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/crop_parameter.h new file mode 100644 index 0000000000..c9f8113df5 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/crop_parameter.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_CROP_PARAMETER_H_ +#define MINDSPORE_NNACL_CROP_PARAMETER_H_ + +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +typedef struct CropParameter { + OpParameter op_parameter_; + CropQuantArg quant_arg; + int thread_count_; + int offset_size_; + int64_t offset_[COMM_SHAPE_SIZE]; + int64_t in_offset_[COMM_SHAPE_SIZE]; + int64_t axis_; + int *in_shape_; + int *out_shape_; + int input_dim_; +} CropParameter; + +#endif // MINDSPORE_NNACL_CROP_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/depth_to_space_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/depth_to_space_parameter.h new file mode 100644 index 0000000000..f107c6177a --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/depth_to_space_parameter.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_DEPTH_TO_SPACE_PARAMETER_H_ +#define MINDSPORE_NNACL_DEPTH_TO_SPACE_PARAMETER_H_ +#include "nnacl/op_base.h" + +typedef struct DepthToSpaceParameter { + OpParameter op_parameter_; + // primitive parameter + int32_t block_size_; + // shape correlative + int32_t in_stride_dim0_; + int32_t in_stride_dim1_; + int32_t in_stride_dim2_; + int32_t out_stride_dim0_; + int32_t out_stride_dim1_; + int32_t out_stride_dim2_; + // other parameter + uint8_t data_type_size_; +} DepthToSpaceParameter; + +#endif // MINDSPORE_NNACL_DEPTH_TO_SPACE_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/detection_post_process_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/detection_post_process_parameter.h new file mode 100644 index 0000000000..bd8b00af32 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/detection_post_process_parameter.h @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_DETECTION_POST_PROCESS_PARAMETER_H_ +#define MINDSPORE_NNACL_DETECTION_POST_PROCESS_PARAMETER_H_ +#include "nnacl/op_base.h" + +typedef struct DetectionPostProcessParameter { + OpParameter op_parameter_; + float h_scale_; + float w_scale_; + float x_scale_; + float y_scale_; + float nms_iou_threshold_; + float nms_score_threshold_; + int64_t max_detections_; + int64_t detections_per_class_; + int64_t max_classes_per_detection_; + int64_t num_classes_; + bool use_regular_nms_; + bool out_quantized_; + + float *anchors_; + + void *decoded_boxes_; + void *nms_candidate_; + void *indexes_; + void *scores_; + void *all_class_indexes_; + void *all_class_scores_; + void *single_class_indexes_; + void *selected_; +} DetectionPostProcessParameter; + +#endif // MINDSPORE_NNACL_DETECTION_POST_PROCESS_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/errorcode.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/errorcode.h new file mode 100644 index 0000000000..18f93af9f8 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/errorcode.h @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_ERRORCODE_H_ +#define MINDSPORE_NNACL_ERRORCODE_H_ + +typedef enum ErrorCodeCommonEnum { + NNACL_OK = 0, + NNACL_ERR = 1, + NNACL_NULL_PTR, + NNACL_PARAM_INVALID, + NNACL_INFER_INVALID, + NNACL_INPUT_TENSOR_ERROR, + NNACL_COMMON_END = 9999 +} ErrorCodeCommonEnum; + +typedef enum ErrorCodeFp32OpEnum { + NNACL_ERRCODE_OP_FP32_START = 10000, + NNACL_ERRCODE_STRASSEN_RECURSION_MALLOC, + NNACL_ERRCODE_REVERSE_MALLOC, + NNACL_ERRCODE_SQRT_NEGATIVE, + NNACL_ERRCODE_RSQRT_NEGATIVE, + NNACL_ERRCODE_RSQRT_NEGATIVE_OR_ZERO, + NNACL_ERRCODE_LOG_NEGATIVE_OR_ZERO, + NNACL_ERRCODE_DIVISOR_ZERO, + NNACL_ERRCODE_INDEX_OUT_OF_RANGE, + NNACL_ERRCODE_WINOGRAD_GENERATOR_ERROR, + NNACL_ERRCODE_OP_FP32_END = 19999 +} ErrorCodeFp32OpEnum; + +typedef enum ErrorCodeFp16OpEnum { + NNACL_ERRCODE_OP_FP16_START = 20000, + NNACL_ERRCODE_OP_FP16_WINOGRAD_GENERATOR, + NNACL_ERRCODE_OP_FP16_END = 29999 +} ErrorCodeFp16OpEnum; + +typedef enum ErrorCodeUint8OpEnum { + NNACL_ERRCODE_OP_UINT8_START = 30000, + NNACL_ERRCODE_OP_UINT8_END = 39999 +} ErrorCodeUint8OpEnum; + +typedef enum ErrorCodeInt8OpEnum { + NNACL_ERRCODE_OP_INT8_START = 40000, + NNACL_ERRCODE_ADD_OVERFLOW, + NNACL_ERRCODE_MUL_OVERFLOW, + NNACL_ERRCODE_OP_INT8_END = 49999 +} ErrorCodeInt8OpEnums; + +#endif // MINDSPORE_NNACL_ERRORCODE_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fill_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fill_parameter.h new file mode 100644 index 0000000000..0b57d2c4a4 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fill_parameter.h @@ -0,0 +1,28 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FILL_PARAMETER_H_ +#define MINDSPORE_NNACL_FILL_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct FillParameter { + // Primitive parameter + OpParameter op_parameter_; + int dims_[COMM_SHAPE_SIZE]; + int num_dims_; +} FillParameter; + +#endif // MINDSPORE_NNACL_FILL_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/fp16/activation_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/activation_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/activation_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/activation_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/activation_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/activation_fp16.h new file mode 100644 index 0000000000..e287962602 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/activation_fp16.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP16_ACTIVATION_FP16_H_ +#define MINDSPORE_NNACL_FP16_ACTIVATION_FP16_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include +#include "nnacl/op_base.h" +#include "nnacl/int8/fixed_point.h" + +#ifdef __cplusplus +extern "C" { +#endif +int ReluFp16(const float16_t *src, float16_t *dst, int ele_num); +int Relu6Fp16(const float16_t *data, float16_t *dst, int ele_num); +int LReluFp16(const float16_t *src, float16_t *dst, int ele_num, float16_t alpha); +int SigmoidFp16(const float16_t *src, float16_t *dst, int ele_num); +int TanhFp16(const float16_t *src, float16_t *dst, int ele_num); +int HSwishFp16(const float16_t *src, float16_t *dst, int ele_num); +int SwishFp16(const float16_t *src, float16_t *dst, int ele_num); +int HardTanhFp16(const float16_t *src, int length, float16_t *dst, float min_val, float max_val); +int GeluFp16(const float16_t *src, int length, float16_t *dst, bool approximate); +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_FP16_ACTIVATION_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/arg_min_max_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arg_min_max_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/arg_min_max_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arg_min_max_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arg_min_max_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arg_min_max_fp16.h new file mode 100644 index 0000000000..c762b2b905 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arg_min_max_fp16.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP16_ARG_MIN_MAX_H_ +#define MINDSPORE_NNACL_FP16_ARG_MIN_MAX_H_ + +#include +#include "nnacl/arg_min_max_parameter.h" +#include "nnacl/nnacl_common.h" + +#ifdef __cplusplus +extern "C" { +#endif +void ArgMinMaxFp16(const float16_t *input, void *output, float16_t *output_value, const int *in_shape, + const ArgMinMaxParameter *param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_ARG_MIN_MAX_H_ diff --git a/mindspore/lite/nnacl/fp16/arithmetic_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arithmetic_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/arithmetic_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arithmetic_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arithmetic_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arithmetic_fp16.h new file mode 100644 index 0000000000..e500aaccd4 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arithmetic_fp16.h @@ -0,0 +1,126 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP16_ARITHMETIC_FP16_H_ +#define MINDSPORE_NNACL_FP16_ARITHMETIC_FP16_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/base/arithmetic_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void TileOneDimensionFp16(const float16_t *inData, float16_t *outData, int dim, size_t ndim, const int *inShape, + const int *inStrides, const int *outStrides, const int *multiple); +void TileDimensionsFp16(const float16_t *data0, const float16_t *data1, float16_t *tile_data0, float16_t *tile_data1, + ArithmeticParameter *param); + +int ElementOptMulFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptMulReluFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptMulRelu6Fp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptAddFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptAddReluFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptAddRelu6Fp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptSubFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptSubReluFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptSubRelu6Fp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptDivFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptDivReluFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptDivRelu6Fp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptFloorModFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptFloorDivFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptLogicalAndFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptLogicalOrFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptSquaredDifferenceFp16(const float16_t *input0, const float16_t *input1, float16_t *output, + int element_size, ArithmeticParameter *param); +int ElementOptMaximumFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptMinimumFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptNotEqualFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptEqualFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptLessFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptLessEqualFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptGreaterFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size, + ArithmeticParameter *param); +int ElementOptGreaterEqualFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size, + ArithmeticParameter *param); + +int ElementMulFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); +int ElementMulReluFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); +int ElementMulRelu6Fp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); + +int ElementAddFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); +int ElementAddReluFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); +int ElementAddRelu6Fp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); +int BroadcastAddFp16(const float16_t *in0, const float16_t *in1, float16_t *tile_in0, float16_t *tile_in1, + float16_t *out, int size, ArithmeticParameter *param); + +int ElementSubFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); +int ElementSubReluFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); +int ElementSubRelu6Fp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); + +int ElementDivFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); +int ElementDivReluFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); +int ElementDivRelu6Fp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); + +int ElementFloorModFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); +int ElementFloorDivFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); + +int ElementLogicalAndFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); +int ElementLogicalOrFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); + +int ElementSquaredDifferenceFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); + +int ElementMaximumFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); +int ElementMinimumFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); + +int ElementNotEqualFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size); +int ElementEqualFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size); +int ElementLessFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size); +int ElementLessEqualFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size); +int ElementGreaterFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size); +int ElementGreaterEqualFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_ARITHMETIC_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/arithmetic_self_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arithmetic_self_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/arithmetic_self_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arithmetic_self_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arithmetic_self_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arithmetic_self_fp16.h new file mode 100644 index 0000000000..3b94e10406 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/arithmetic_self_fp16.h @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP16_ARITHMETIC_SELF_FP16_H_ +#define MINDSPORE_NNACL_FP16_ARITHMETIC_SELF_FP16_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif +int ElementAbsFp16(float16_t *input, float16_t *output, int element_size); + +int ElementCosFp16(float16_t *input, float16_t *output, int element_size); + +int ElementLogFp16(float16_t *input, float16_t *output, int element_size); + +int ElementSquareFp16(float16_t *input, float16_t *output, int element_size); + +int ElementSqrtFp16(float16_t *input, float16_t *output, int element_size); + +int ElementRsqrtFp16(float16_t *input, float16_t *output, int element_size); + +int ElementSinFp16(float16_t *input, float16_t *output, int element_size); + +int ElementLogicalNotFp16(float16_t *input, float16_t *output, int element_size); + +int ElementRoundFp16(float16_t *input, float16_t *output, int element_size); + +int ElementFloorFp16(float16_t *input, float16_t *output, int element_size); + +int ElementCeilFp16(float16_t *input, float16_t *output, int number); + +int ElementNegativeFp16(float16_t *input, float16_t *output, int element_size); + +int ElementReciprocalFp16(float16_t *input, float16_t *output, int element_size); + +int ElementErfFp16(float16_t *input, float16_t *output, int element_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_ARITHMETIC_SELF_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/batchnorm_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/batchnorm_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/batchnorm_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/batchnorm_fp16.c diff --git a/mindspore/lite/nnacl/fp16/batchnorm_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/batchnorm_fp16.h similarity index 100% rename from mindspore/lite/nnacl/fp16/batchnorm_fp16.h rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/batchnorm_fp16.h diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/cast_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/cast_fp16.h new file mode 100644 index 0000000000..d7136cd1ae --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/cast_fp16.h @@ -0,0 +1,65 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_CAST_FP16_H_ +#define MINDSPORE_NNACL_CAST_FP16_H_ + +#include +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +inline void BoolToFloat16(const bool *input, float16_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (float16_t)input[i]; + } +} + +inline void Uint8ToFloat16(const uint8_t *input, float16_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (float16_t)input[i]; + } +} + +inline void Float16ToInt32(const float16_t *input, int32_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (int32_t)input[i]; + } +} + +inline void Float16ToInt64(const float16_t *input, int64_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (int64_t)input[i]; + } +} + +inline void Float32ToFloat16(const float *input, float16_t *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (float16_t)input[i]; + } +} + +inline void Float16ToFloat32(const float16_t *input, float *output, int number) { + for (int i = 0; i < number; ++i) { + output[i] = (float)input[i]; + } +} + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_CAST_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/common_func_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/common_func_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/common_func_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/common_func_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/common_func_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/common_func_fp16.h new file mode 100644 index 0000000000..b6abd3e267 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/common_func_fp16.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP16_COMMON_FUNC_FP16_H_ +#define MINDSPORE_NNACL_FP16_COMMON_FUNC_FP16_H_ + +#include +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* deconv common */ +void PostConvFuncFp16C8(const float16_t *c8_out_ptr, float16_t *out_ptr, const float16_t *bias_ptr, + size_t output_channel, size_t plane_size, size_t stride, ActType act_type); +void PostFuncBiasReluC8Fp16(float16_t *dst, const float16_t *src, const float16_t *bias, size_t oc8div, size_t oc8mod, + size_t plane_size, size_t stride, size_t relu_type); + +/* deconv winograd */ +void PostConvFuncFp16C4(const float16_t *c4_out, float16_t *nhwc_out, const float16_t *bias, size_t output_channel, + size_t plane_size, size_t plane_stride, ActType act_type); +void PostFuncBiasReluC4Fp16(float16_t *dst, const float16_t *src, const float16_t *bias, size_t oc4div, size_t oc4mod, + size_t plane_size, size_t plane_stride, size_t relu_type); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_FP16_COMMON_FUNC_FP16_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/constant_of_shape_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/constant_of_shape_fp16.h new file mode 100644 index 0000000000..6c42506a46 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/constant_of_shape_fp16.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP16_CONSTANT_OF_SHAPE_FP16_H_ +#define MINDSPORE_NNACL_FP16_CONSTANT_OF_SHAPE_FP16_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/constant_of_shape_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +#ifdef __cplusplus +#ifdef ENABLE_NEON +inline int ConstantOfShapeFp16(float16_t *output, int start, int end, float16_t value) { + for (int i = start; i < end; i++) { + output[i] = value; + } + return NNACL_OK; +} +#endif +} +#endif + +#endif // MINDSPORE_NNACL_FP16_CONSTANT_OF_SHAPE_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/conv_depthwise_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/conv_depthwise_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/conv_depthwise_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/conv_depthwise_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/conv_depthwise_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/conv_depthwise_fp16.h new file mode 100644 index 0000000000..80c5347186 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/conv_depthwise_fp16.h @@ -0,0 +1,57 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP16_CONV_DEPTHWISE_FP16_H_ +#define MINDSPORE_NNACL_FP16_CONV_DEPTHWISE_FP16_H_ + +#include "nnacl/conv_parameter.h" +#include "nnacl/fp32/conv_depthwise_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif +#ifdef ENABLE_ARM64 +void ConvDwFp16Row(float16_t *output_ptr, const float16_t *input_ptr, const float16_t *filter_ptr, size_t num_pixels, + size_t input_channel, size_t input_step); +void ConvDwFp16Border(float16_t *dst, const float16_t *src, const float16_t *weight, const float16_t *bias, + size_t height, size_t width, size_t in_kh_step, size_t in_kw_step, size_t kernel_w, size_t relu, + size_t relu6); +void ConvDwFp16Center(float16_t *dst, const float16_t *src, const float16_t *weight, const float16_t *bias, + size_t height, size_t width, size_t kernel_h, size_t kernel_w, size_t out_h_step, + size_t block_channel, size_t in_sh_step, size_t in_sw_step, size_t in_kh_step, size_t in_kw_step, + size_t relu, size_t relu6); +void DeconvDwFp16Border(float16_t *dst, const float16_t *src, const float16_t *weight, size_t height, size_t width, + size_t in_kh_step, size_t in_kw_step, size_t kernel_w); +void DeconvDwFp16Center(float16_t *dst, const float16_t *src, const float16_t *weight, size_t height, size_t width, + size_t kernel_h, size_t kernel_w, size_t out_h_step, size_t block_channel, size_t in_sh_step, + size_t in_sw_step, size_t in_kh_step, size_t in_kw_step); +#endif + +void ConvDwFp16(float16_t *output_data, const float16_t *input_data, const float16_t *weight_data, + const float16_t *bias_data, const ConvParameter *conv_param, int task_id); + +void ConvDwC8Fp16(float16_t *output_data, const float16_t *input_data, const float16_t *weight_data, + const float16_t *bias_data, const ConvParameter *conv_param, const SlidingWindowParam *sliding, + int task_id); + +void DeconvDwC8Fp16(float16_t *output_data, const float16_t *input_data, const float16_t *weight_data, + const float16_t *bias_data, const ConvParameter *conv_param, const SlidingWindowParam *sliding, + int task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_CONV_DEPTHWISE_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/conv_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/conv_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/conv_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/conv_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/conv_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/conv_fp16.h new file mode 100644 index 0000000000..8ecde2bc6f --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/conv_fp16.h @@ -0,0 +1,57 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP16_CONV_FP16_H_ +#define MINDSPORE_NNACL_FP16_CONV_FP16_H_ + +#include +#include "nnacl/conv_parameter.h" +#include "nnacl/fp16/winograd_utils_fp16.h" +#include "nnacl/fp16/winograd_transform_fp16.h" + +typedef float16_t *TmpBufferAddressFp16; +typedef float16_t *MatricesFp16; + +#ifndef ENABLE_NEON +void IndirectGemmFp16_16x8(float16_t *output, float16_t *input, float16_t *weight, float16_t *bias, size_t step, + size_t ic4, size_t oc8, size_t offset, size_t mode, size_t writeC8, size_t relu, + size_t relu6); + +void IndirectGemmFp16_16x8_common(float16_t *output, float16_t *input, float16_t *weight, float16_t *bias, size_t step, + size_t ic4, size_t oc8, size_t offset, size_t relu, size_t relu6); + +void IndirectGemmFp16_16x8_c8(float16_t *output, float16_t *input, float16_t *weight, float16_t *bias, size_t step, + size_t ic4, size_t oc8, size_t offset, size_t mode, size_t writeC8, size_t relu, + size_t relu6); +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +// fp16 convolution common (im2col+gemm) +void ConvFp16(float16_t *input_data, float16_t *packed_input, float16_t *packed_weight, float16_t *bias_data, + float16_t *col_major_input, float16_t *output_data, int task_id, ConvParameter *conv_param); + +// fp16 convolution winograd +void ConvWinogardFp16(float16_t *input_data, float16_t *trans_weight, const float16_t *bias_data, + float16_t *output_data, TmpBufferAddressFp16 *buffer_list, int task_id, ConvParameter *conv_param, + InputTransFp16Func in_func, OutputTransFp16Func out_func); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_CONV_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/crop_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/crop_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/crop_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/crop_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/crop_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/crop_fp16.h new file mode 100644 index 0000000000..6efa3a9837 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/crop_fp16.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP16_CROP_FP16_H_ +#define MINDSPORE_NNACL_FP16_CROP_FP16_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/crop_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Fp16Crop(const float16_t *input, float16_t *output, int task_id, CropParameter *para); +void Fp16Crop1D(const float16_t *input, float16_t *output, int task_id, CropParameter *para); +void Fp16Crop2D(const float16_t *input, float16_t *output, int task_id, CropParameter *para); +void Fp16Crop3D(const float16_t *input, float16_t *output, int task_id, CropParameter *para); +void Fp16Crop4D(const float16_t *input, float16_t *output, int task_id, CropParameter *para); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_CROP_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/deconv_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/deconv_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/deconv_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/deconv_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/deconv_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/deconv_fp16.h new file mode 100644 index 0000000000..b1de538b19 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/deconv_fp16.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP16_DECONV_FP16_H_ +#define MINDSPORE_NNACL_FP16_DECONV_FP16_H_ + +#include +#include +#include "nnacl/conv_parameter.h" +#include "nnacl/errorcode.h" +#include "nnacl/fp16/common_func_fp16.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int DeConvPostFp16(const float16_t *src, float16_t *tmp, const float16_t *bias, float16_t *dst, int output_channel, + ConvParameter *conv_param); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_FP16_DECONV_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/deconv_winograd_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/deconv_winograd_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/deconv_winograd_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/deconv_winograd_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/deconv_winograd_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/deconv_winograd_fp16.h new file mode 100644 index 0000000000..cfe9a40e5a --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/deconv_winograd_fp16.h @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP16_DECONV_WINOGRAD_FP16_H_ +#define MINDSPORE_NNACL_FP16_DECONV_WINOGRAD_FP16_H_ + +#include "nnacl/fp16/winograd_transform_fp16.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int PackDeConvWgDataFp16(float16_t *nhwc_weight, DeConvComputeUnit *unit, ConvParameter *conv_param, + DeConvParam *deconv_param); + +void DeconvWgFp16(float16_t *nhwc_input_, float16_t *tile_in, float16_t *tile_out, int start_index, int calculate_count, + ConvParameter *conv_param, DeConvParam *deconv_param, int task_id); + +void DeconvWgPostFp16(float16_t *tile_out, float16_t *nc4hw4_output, ConvParameter *conv_param, + DeConvParam *deconv_param, int calculate_count, int tile_index); + +void TiledC4MatmulFp16(float16_t *dst, const float16_t *src, const float16_t *weight, size_t ic4, size_t cal_num, + size_t oc4); + +void WinogradTransLeftFp16(const float16_t *S, const float16_t *B, float16_t *M, size_t w, size_t h, size_t k, + size_t length); + +void WinogradTransRightFp16(const float16_t *S, const float16_t *B, float16_t *M, size_t w, size_t h, size_t k, + size_t length); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_DECONV_WINOGRAD_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/exp_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/exp_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/exp_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/exp_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/exp_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/exp_fp16.h new file mode 100644 index 0000000000..8bc71c3805 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/exp_fp16.h @@ -0,0 +1,70 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP16_EXP_H_ +#define MINDSPORE_NNACL_FP16_EXP_H_ + +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +void ExpFp16(const float16_t *src, float16_t *dst, int num); + +#if defined(ENABLE_ARM64) +static inline float32x4_t exp_fp32(float32x4_t input) { + static float32x4_t param[] = {{0.693147f, 0.693147f, 0.693147f, 0.693147f}, + {1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120}, + {1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24}, + {1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6}, + {0.5f, 0.5f, 0.5f, 0.5f}, + {1.0f, 1.0f, 1.0f, 1.0f}}; + int32x4_t integer = vcvtq_s32_f32(input / param[0]); + float32x4_t decimal = input - vcvtq_f32_s32(integer) * param[0]; + int32x4_t int_exp = vshlq_s32((integer + vmovq_n_s32(127)), vmovq_n_s32(23)); + float32x4_t decimal_exp = + param[5] + + decimal * (param[5] + decimal * (param[4] + decimal * (param[3] + decimal * (param[2] + decimal * param[1])))); + decimal_exp = decimal_exp * vld1q_f32((float *)(&int_exp)); + return decimal_exp; +} + +static inline void simd_exp_fp16(float16x8_t input, float16_t *dst) { + static float16x8_t maxv = {88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f}; + static float16x8_t minv = {-88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f}; + + input = vmaxq_f16(minv, vminq_f16(input, maxv)); + float32x4_t input_low = vcvt_f32_f16(vget_low_f16(input)); + float32x4_t input_high = vcvt_high_f32_f16(input); + vst1q_f16(dst, vcombine_f16(vcvt_f16_f32(exp_fp32(input_low)), vcvt_f16_f32(exp_fp32(input_high)))); +} +#endif + +static inline void single_exp_fp16(float16_t src, float16_t *dst) { + static float param[] = {0.693147f, 1.0f / 120, 1.0f / 24, 1.0f / 6, 1.0f / 2, 1.0f}; + src = MSMAX(-88.0f, MSMIN(88.0f, src)); + int integer = (float)src / param[0]; + float decimal = (float)src - integer * param[0]; + int int_exp = (integer + 127) << 23; + const float decimal_exp = + 1.0f + decimal * (1.0f + decimal * (0.5f + decimal * (param[3] + decimal * (param[2] + decimal * param[1])))); + *dst = (float16_t)(*((float *)&int_exp) * decimal_exp); +} +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_EXP_H_ diff --git a/mindspore/lite/nnacl/fp16/gru_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/gru_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/gru_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/gru_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/gru_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/gru_fp16.h new file mode 100644 index 0000000000..612532147d --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/gru_fp16.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP16_GRU_H_ +#define MINDSPORE_NNACL_FP16_GRU_H_ +#include "nnacl/gru_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void GruFp16(float16_t *output, const float16_t *input, const float16_t *weight_g, const float16_t *weight_r, + const float16_t *input_bias, const float16_t *state_bias, float16_t *hidden_state, float16_t *buffer[4], + int check_seq_len, const GruParameter *gru_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_GRU_H_ diff --git a/mindspore/lite/nnacl/fp16/instance_norm_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/instance_norm_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/instance_norm_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/instance_norm_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/instance_norm_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/instance_norm_fp16.h new file mode 100644 index 0000000000..e22bd88519 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/instance_norm_fp16.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP16_INSTANCE_NORM_H_ +#define MINDSPORE_NNACL_FP16_INSTANCE_NORM_H_ + +#include "nnacl/instance_norm_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int InstanceNormFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *gamma_data, + const float16_t *beta_data, const InstanceNormParameter *param, size_t task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_INSTANCE_NORM_H_ diff --git a/mindspore/lite/nnacl/fp16/log_softmax_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/log_softmax_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/log_softmax_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/log_softmax_fp16.c diff --git a/mindspore/lite/nnacl/fp16/log_softmax_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/log_softmax_fp16.h similarity index 100% rename from mindspore/lite/nnacl/fp16/log_softmax_fp16.h rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/log_softmax_fp16.h diff --git a/mindspore/lite/nnacl/fp16/lstm_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/lstm_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/lstm_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/lstm_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/lstm_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/lstm_fp16.h new file mode 100644 index 0000000000..5972ae9156 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/lstm_fp16.h @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP16_LSTM_H_ +#define MINDSPORE_NNACL_FP16_LSTM_H_ + +#include "nnacl/lstm_parameter.h" +#ifdef __cplusplus +extern "C" { +#endif +void PackLstmWeightFp32ToFp16(float16_t *dst, const float *src, int batch, int deep, int col, int col_align); + +void PackLstmWeightFp16(float16_t *dst, const float16_t *src, int batch, int deep, int col, int col_align); + +void PackLstmBiasFp32ToFp16(float16_t *dst, const float *src, int batch, int col, int col_align, bool is_bidirectional); + +void PackLstmBiasFp16(float16_t *dst, const float16_t *src, int batch, int col, int col_align, bool is_bidirectional); + +void LstmMatMulFp16(float16_t *c, const float16_t *a, const float16_t *b, const float16_t *bias, int row, int deep, + int col, bool is_vec); + +void MatMulAccFp16(float16_t *output, const float16_t *input, const float16_t *weight, int rows, int cols, + int inner_size); + +void ElementMulAccFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); + +int ElementOptMulAccFp16(const float16_t *input0, const float16_t input1, float16_t *output, const int element_size); + +void LstmFp16(float16_t *output, const float16_t *input, const float16_t *weight_i, const float16_t *weight_h, + const float16_t *input_bias, const float16_t *state_bias, float16_t *hidden_state, float16_t *cell_state, + float16_t *buffer[6], const LstmParameter *lstm_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_LSTM_H_ diff --git a/mindspore/lite/nnacl/fp16/matmul_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/matmul_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/matmul_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/matmul_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/matmul_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/matmul_fp16.h new file mode 100644 index 0000000000..113553ef59 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/matmul_fp16.h @@ -0,0 +1,68 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP16_MATMUL_H_ +#define MINDSPORE_NNACL_FP16_MATMUL_H_ + +#include +#include +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/errorcode.h" +#include "nnacl/matmul_parameter.h" +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +void MatMul16x8(const float16_t *a, const float16_t *b, float16_t *dst, const float16_t *bias, ActType act_type, + int deep, int row, int col, int stride, bool write_nhwc); + +void MatMulFp16(const float16_t *a, const float16_t *b, float16_t *c, const float16_t *bias, ActType act_type, + int depth, int row, int col, int stride, int out_type); + +void MatVecMulFp16(const float16_t *a, const float16_t *b, float16_t *c, const float16_t *bias, ActType act_type, + int depth, int col); + +void ColMajor2Row8MajorFp16(const void *src_ptr, float16_t *dst_ptr, size_t row, size_t col, bool src_float16); + +void RowMajor2Col16MajorFp16Opt(const float16_t *src_ptr, float16_t *dst_ptr, size_t row, size_t col); + +void MatmulFp16Neon64(const float16_t *a, const float16_t *b, float16_t *c, const float16_t *bias, int act_type, + size_t depth, size_t row, size_t col, size_t stride, bool write_nhwc); + +void MatmulFp16Neon64Opt(const float16_t *a, const float16_t *b, float16_t *c, const float16_t *bias, int act_type, + size_t depth, size_t row, size_t col, size_t stride, size_t write_nhwc); + +void MatVecMulFp16Neon64(const float16_t *a, const float16_t *b, float16_t *c, const float16_t *bias, int act_type, + int depth, int col); + +void RowMajor2Col16MajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src); + +void RowMajor2Row16MajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src); + +void RowMajor2Row8MajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src); + +void RowMajor2Col8MajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src); + +void RowMajor2ColMajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_MATMUL_H_ diff --git a/mindspore/lite/nnacl/fp16/matrix_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/matrix_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/matrix_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/matrix_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/matrix_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/matrix_fp16.h new file mode 100644 index 0000000000..b554440c4e --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/matrix_fp16.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP16_MATRIX_FP16_H_ +#define MINDSPORE_NNACL_FP16_MATRIX_FP16_H_ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif +void MatrixMultiplyFp16(const float16_t *matrix_a, const float16_t *matrix_b, float16_t *matrix_c, int m, int k, int n); + +void MatrixMultiplyVecFp16(const float16x8_t *matrix_a, const float16x8_t *matrix_b, float16x8_t *matrix_c, + const float16_t *bias, int m, int k, int n); +void MatrixMultiplyWinogradFp16(const float16_t *matix_a, const float16_t *matrix_b, float16_t *matrix_c, int m, int k, + int n, int in_channel); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_MATRIX_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/pack_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pack_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/pack_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pack_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pack_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pack_fp16.h new file mode 100644 index 0000000000..5f8f1632c0 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pack_fp16.h @@ -0,0 +1,79 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP16_PACK_FP16_H_ +#define MINDSPORE_NNACL_FP16_PACK_FP16_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/conv_parameter.h" +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Im2ColPackUnitFp16(float16_t *input_data, ConvParameter *conv_param, float16_t *packed_input, int real_cal_num, + int block_index); + +void PackWeightToC8Fp16(const float16_t *origin_weight_data, float16_t *packed_weight_data, ConvParameter *conv_param); + +void PackHWCToWHCFp16(const float16_t *src, float16_t *dst, int height, int width, int channel); + +void PackWeightToC4Fp16(const float16_t *origin_weight_data, float16_t *packed_weight_data, ConvParameter *conv_param); + +void PackNHWCToNC4HW4Fp16(const void *src, void *dst, int batch, int plane, int channel); + +void PackNCHWToNC4HW4Fp16(const void *src, void *dst, int batch, int plane, int channel); + +void PackNCHWToNHWCFp16(const void *src, void *dst, int batch, int plane, int channel); + +void PackNHWCToNCHWFp16(const void *src, void *dst, int batch, int plane, int channel); + +void PackNHWCToNHWC4Fp16(const void *src, void *dst, int batch, int plane, int channel); + +void PackNHWCToNHWC8Fp16(const void *src, void *dst, int batch, int plane, int channel); + +void PackNHWC4ToNHWCFp16(const void *src, void *dst, int batch, int plane, int channel); + +void PackNCHWToNHWC4Fp16(const void *src, void *dst, int batch, int plane, int channel); + +void PackNC4HW4ToNHWC4Fp16(const void *src, void *dst, int batch, int plane, int channel); + +void PackNC4HW4ToNHWCFp16(const void *src, void *dst, int batch, int plane, int channel); + +void PackNC4HW4ToNCHWFp16(const void *src, void *dst, int batch, int plane, int channel); + +void PackNC8HW8ToNHWCFp16(const void *src, void *dst, int batch, int plane, int channel); + +void PackNCHWFp32ToNC8HW8Fp16(float *src, float16_t *dst, int batch, int plane, int channel); + +void PackNCHWFp16ToNC8HW8Fp16(float16_t *src, float16_t *dst, int batch, int plane, int channel); + +void PackNHWCFp32ToNHWC8Fp16(float *src, float16_t *dst, int batch, int plane, int channel); + +void PackNHWCFp32ToC8HWN8Fp16(float *src, float16_t *dst, int batch, int plane, int channel); + +void PackNHWCFp16ToC8HWN8Fp16(float16_t *src, float16_t *dst, int batch, int plane, int channel); + +void PackNHWC8Fp16ToNHWCFp32(float16_t *src, float *dst, int batch, int plane, int channel); + +void PackNHWC8ToNHWCFp16(float16_t *src, float16_t *dst, int batch, int plane, int channel); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_PACK_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/pad_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pad_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/pad_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pad_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pad_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pad_fp16.h new file mode 100644 index 0000000000..514e9f3fbc --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pad_fp16.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP16_PAD_FP16_H_ +#define MINDSPORE_NNACL_FP16_PAD_FP16_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/fp32/pad_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif +void PadFp16(const float16_t *input_data, float16_t *output_data, const int *input_shape, const int *output_shape, + const int *paddings, const int tid, const int thread_num); +void MirrorPadFp16(const float16_t *input_data, float16_t *output_data, const int *input_shape, + const PadParameter *pad_param, int begin, int end); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_PAD_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/pooling_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pooling_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/pooling_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pooling_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pooling_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pooling_fp16.h new file mode 100644 index 0000000000..f3dec61de7 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/pooling_fp16.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP16_POOLING_FP16_H_ +#define MINDSPORE_NNACL_FP16_POOLING_FP16_H_ + +#include +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/pooling_parameter.h" +#ifdef __cplusplus +extern "C" { +#endif +int AvgPoolingFp16(const float16_t *input_ptr, float16_t *output_ptr, PoolingParameter *pooling_param, int task_id, + float16_t min, float16_t max); + +void MaxPoolingFp16(const float16_t *input_ptr, float16_t *output_ptr, PoolingParameter *pooling_param, int task_id, + float16_t min, float16_t max); +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_FP16_POOLING_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/power_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/power_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/power_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/power_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/power_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/power_fp16.h new file mode 100644 index 0000000000..8a5ad96fb0 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/power_fp16.h @@ -0,0 +1,63 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP16_POWER_FP16_H_ +#define MINDSPORE_NNACL_FP16_POWER_FP16_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/power_parameter.h" + +#if defined(ENABLE_NEON) +typedef float16x8_t (*PowerSimdFunFp16)(float16x8_t x, const void *exponent); +#endif +typedef float16_t (*PowerScalarFunFp16)(float16_t x, const void *exponent); +typedef void (*PowerFunFp16)(const float16_t *, const float16_t *, float16_t *, int, float, float); + +#ifdef __cplusplus +extern "C" { +#endif +static inline bool CheckInteger(float16_t f) { return floorf(f) == f; } + +static inline float16_t StdPowerScalarFp16(float16_t x, const void *exponent) { + return powf(x, *(float16_t *)exponent); +} + +#if defined(ENABLE_NEON) +static inline float16x8_t StdPowerSimdFp16(float16x8_t x, const void *exponent) { + float16x8_t result; + result[0] = powf(x[0], *(float16_t *)exponent); + result[1] = powf(x[1], *(float16_t *)exponent); + result[2] = powf(x[2], *(float16_t *)exponent); + result[3] = powf(x[3], *(float16_t *)exponent); + result[4] = powf(x[4], *(float16_t *)exponent); + result[5] = powf(x[5], *(float16_t *)exponent); + result[6] = powf(x[6], *(float16_t *)exponent); + result[7] = powf(x[7], *(float16_t *)exponent); + return result; +} +#endif +int PowerFp16(const float16_t *input, const float16_t *exponent, float16_t *output, int len, float scale, float shift, + bool broadcast); +void PowerSingleFp16(const float16_t *input, const float16_t *exponent, float16_t *output, int len, float scale, + float shift); +void PowerBroadCastFp16(const float16_t *input, const float16_t *exponent, float16_t *output, int len, float scale, + float shift); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_POWER_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/quant_dtype_cast_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/quant_dtype_cast_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/quant_dtype_cast_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/quant_dtype_cast_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/quant_dtype_cast_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/quant_dtype_cast_fp16.h new file mode 100644 index 0000000000..a92025d5c1 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/quant_dtype_cast_fp16.h @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP16_QUANTDTYPECAST_FP16_H_ +#define MINDSPORE_NNACL_FP16_QUANTDTYPECAST_FP16_H_ + +#include "nnacl/op_base.h" + +#ifdef ENABLE_NEON +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif +int DoDequantizeInt8ToFp16(int8_t *quant_values, float16_t *real_values, float scale, int32_t zp, int size); +int DoQuantizeFp16ToInt8(float16_t *real_values, int8_t *quant_values, float scale, int32_t zp, int size); + +int DoDequantizeUInt8ToFp16(uint8_t *quant_values, float16_t *real_values, float scale, int32_t zp, int size); +int DoQuantizeFp16ToUInt8(float16_t *real_values, uint8_t *quant_values, float scale, int32_t zp, int size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_QUANTDTYPECAST_H_ diff --git a/mindspore/lite/nnacl/fp16/reduce_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/reduce_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/reduce_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/reduce_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/reduce_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/reduce_fp16.h new file mode 100644 index 0000000000..442b1c1b64 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/reduce_fp16.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP16_REDUCE_FP16_H_ +#define MINDSPORE_NNACL_FP16_REDUCE_FP16_H_ +#include "nnacl/op_base.h" +#include "nnacl/reduce_parameter.h" + +#ifdef ENABLE_NEON +#include +#endif +#ifdef __cplusplus +extern "C" { +#endif +int ReduceMeanFp16(const int outer_size, const int inner_size, const int axis_size, const float16_t *src_data, + float16_t *dst_data, const int tid, const int thread_num); +int ReduceMaxFp16(int outer_size, int inner_size, int axis_size, const float16_t *src_data, float16_t *dst_data, + int tid, int thread_num); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_REDUCE_FP16_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/scale_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/scale_fp16.c new file mode 100644 index 0000000000..aea928149d --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/scale_fp16.c @@ -0,0 +1,223 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/fp16/scale_fp16.h" + +void Fp16ScaleInner(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, + int outer_end, int axis_size, int inner_size) { + for (int out = outer_start; out < outer_end; out++) { + int out_offset = out * axis_size * inner_size; + for (int i = 0; i < axis_size; i++) { + int axis_offset = out_offset + i * inner_size; + int in_index = 0; +#ifdef ENABLE_ARM64 + for (; in_index < inner_size - 8; in_index += 8) { + int in_offset = axis_offset + in_index; + float16x8_t data = vld1q_f16(in_data + in_offset); + float16x8_t scale_8 = vdupq_n_f16(scale[i]); + float16x8_t offset_8 = vdupq_n_f16(offset[i]); + float16x8_t result = vfmaq_f16(offset_8, data, scale_8); + + vst1q_f16(out_data + in_offset, result); + } +#endif + for (; in_index < inner_size; in_index++) { + int in_offset = axis_offset + in_index; + out_data[in_offset] = in_data[in_offset] * scale[i] + offset[i]; + } + } + } +} + +void Fp16ScaleAxis(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, + int outer_end, int axis_size) { + for (int out = outer_start; out < outer_end; out++) { + int out_offset = out * axis_size; + int index = 0; +#ifdef ENABLE_ARM64 + for (; index < axis_size - 8; index += 8) { + int in_offset = out_offset + index; + float16x8_t data = vld1q_f16(in_data + in_offset); + float16x8_t scale_8 = vld1q_f16(scale + index); + float16x8_t offset_8 = vld1q_f16(offset + index); + float16x8_t result = vfmaq_f16(offset_8, data, scale_8); + vst1q_f16(out_data + in_offset, result); + } +#endif + for (; index < axis_size; index++) { + int in_offset = out_offset + index; + out_data[in_offset] = in_data[in_offset] * scale[index] + offset[index]; + } + } +} + +void DoScaleFp16(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, + ScaleParameter *scale_param) { + int outer_step = UP_DIV(scale_param->outer_size_, scale_param->op_parameter_.thread_num_); + int outer_start = task_id * outer_step; + int outer_end = MSMIN(outer_start + outer_step, scale_param->outer_size_); + + if (scale_param->inner_size_ == 1) { + Fp16ScaleAxis(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_); + } else { + Fp16ScaleInner(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_, + scale_param->inner_size_); + } +} + +void Fp16ScaleInnerRelu(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, + int outer_end, int axis_size, int inner_size) { +#ifdef ENABLE_ARM64 + float16x8_t zeros = {0, 0, 0, 0, 0, 0, 0, 0}; +#endif + for (int out = outer_start; out < outer_end; out++) { + int out_offset = out * axis_size * inner_size; + for (int i = 0; i < axis_size; i++) { + int axis_offset = out_offset + i * inner_size; + int in_index = 0; +#ifdef ENABLE_ARM64 + for (; in_index < inner_size - 8; in_index += 8) { + int in_offset = axis_offset + in_index; + float16x8_t data = vld1q_f16(in_data + in_offset); + float16x8_t scale_8 = vdupq_n_f16(scale[i]); + float16x8_t offset_8 = vdupq_n_f16(offset[i]); + float16x8_t tmp = vfmaq_f16(offset_8, data, scale_8); + float16x8_t result = vmaxq_f16(tmp, zeros); + vst1q_f16(out_data + in_offset, result); + } +#endif + for (; in_index < inner_size; in_index++) { + int in_offset = axis_offset + in_index; + float tmp = in_data[in_offset] * scale[i] + offset[i]; + out_data[in_offset] = tmp > 0.0f ? tmp : 0.0f; + } + } + } +} + +void Fp16ScaleAxisRelu(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, + int outer_end, int axis_size) { +#ifdef ENABLE_ARM64 + float16x8_t zeros = {0, 0, 0, 0, 0, 0, 0, 0}; +#endif + for (int out = outer_start; out < outer_end; out++) { + int out_offset = out * axis_size; + int index = 0; +#ifdef ENABLE_ARM64 + for (; index < axis_size - 8; index += 8) { + int in_offset = out_offset + index; + float16x8_t data = vld1q_f16(in_data + in_offset); + float16x8_t scale_8 = vld1q_f16(scale + index); + float16x8_t offset_8 = vld1q_f16(offset + index); + float16x8_t tmp = vfmaq_f16(offset_8, data, scale_8); + float16x8_t result = vmaxq_f16(tmp, zeros); + vst1q_f16(out_data + in_offset, result); + } +#endif + for (; index < axis_size; index++) { + int in_offset = out_offset + index; + float tmp = in_data[in_offset] * scale[index] + offset[index]; + out_data[in_offset] = tmp > 0.0f ? tmp : 0.0f; + } + } +} + +void Fp16DoScaleRelu(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, + ScaleParameter *scale_param) { + int outer_step = UP_DIV(scale_param->outer_size_, scale_param->op_parameter_.thread_num_); + int outer_start = task_id * outer_step; + int outer_end = MSMIN(outer_start + outer_step, scale_param->outer_size_); + + if (scale_param->inner_size_ == 1) { + Fp16ScaleAxisRelu(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_); + } else { + Fp16ScaleInnerRelu(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_, + scale_param->inner_size_); + } +} + +void Fp16ScaleInnerRelu6(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, + int outer_end, int axis_size, int inner_size) { +#ifdef ENABLE_ARM64 + float16x8_t zeros = {0, 0, 0, 0, 0, 0, 0, 0}; + float16x8_t bounds = {6, 6, 6, 6, 6, 6, 6, 6}; +#endif + for (int out = outer_start; out < outer_end; out++) { + int out_offset = out * axis_size * inner_size; + for (int i = 0; i < axis_size; i++) { + int axis_offset = out_offset + i * inner_size; + int in_index = 0; +#ifdef ENABLE_ARM64 + for (; in_index < inner_size - 8; in_index += 8) { + int in_offset = axis_offset + in_index; + float16x8_t data = vld1q_f16(in_data + in_offset); + float16x8_t scale_8 = vdupq_n_f16(scale[i]); + float16x8_t offset_8 = vdupq_n_f16(offset[i]); + float16x8_t tmp = vfmaq_f16(offset_8, data, scale_8); + float16x8_t result = vminq_f16(vmaxq_f16(tmp, zeros), bounds); + vst1q_f16(out_data + in_offset, result); + } +#endif + for (; in_index < inner_size; in_index++) { + int in_offset = axis_offset + in_index; + float tmp = in_data[in_offset] * scale[i] + offset[i]; + out_data[in_offset] = MSMIN(MSMAX(tmp, 0.0f), 6.0f); + } + } + } +} + +void Fp16ScaleAxisRelu6(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, + int outer_end, int axis_size) { +#ifdef ENABLE_ARM64 + float16x8_t zeros = {0, 0, 0, 0, 0, 0, 0, 0}; + float16x8_t bounds = {6, 6, 6, 6, 6, 6, 6, 6}; +#endif + for (int out = outer_start; out < outer_end; out++) { + int out_offset = out * axis_size; + int index = 0; +#ifdef ENABLE_ARM64 + for (; index < axis_size - 8; index += 8) { + int in_offset = out_offset + index; + float16x8_t data = vld1q_f16(in_data + in_offset); + float16x8_t scale_8 = vld1q_f16(scale + index); + float16x8_t offset_8 = vld1q_f16(offset + index); + float16x8_t tmp = vfmaq_f16(offset_8, data, scale_8); + float16x8_t result = vminq_f16(vmaxq_f16(tmp, zeros), bounds); + vst1q_f16(out_data + in_offset, result); + } +#endif + for (; index < axis_size; index++) { + int in_offset = out_offset + index; + float tmp = in_data[in_offset] * scale[index] + offset[index]; + out_data[in_offset] = MSMIN(MSMAX(tmp, 0.0f), 6.0f); + } + } +} + +void DoScaleRelu6Fp16(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, + ScaleParameter *scale_param) { + int outer_step = UP_DIV(scale_param->outer_size_, scale_param->op_parameter_.thread_num_); + int outer_start = task_id * outer_step; + int outer_end = MSMIN(outer_start + outer_step, scale_param->outer_size_); + + if (scale_param->inner_size_ == 1) { + Fp16ScaleAxisRelu6(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_); + } else { + Fp16ScaleInnerRelu6(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_, + scale_param->inner_size_); + } +} diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/scale_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/scale_fp16.h new file mode 100644 index 0000000000..81da793d6b --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/scale_fp16.h @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_SCALE_FP16_H_ +#define MINDSPORE_NNACL_SCALE_FP16_H_ + +#include "nnacl/op_base.h" +#include "nnacl/scale.h" +#ifdef ENABLE_NEON +#include +#endif +#ifdef __cplusplus +extern "C" { +#endif +void DoScaleFp16(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, + ScaleParameter *scale_param); +void Fp16DoScaleRelu(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, + ScaleParameter *scale_param); +void DoScaleRelu6Fp16(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, + ScaleParameter *scale_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_SCALE_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/softmax_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/softmax_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/softmax_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/softmax_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/softmax_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/softmax_fp16.h new file mode 100644 index 0000000000..cdd6ead438 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/softmax_fp16.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP16_SOFTMAX_FP16_H_ +#define MINDSPORE_NNACL_FP16_SOFTMAX_FP16_H_ + +#include "nnacl/op_base.h" +#include "nnacl/softmax_parameter.h" +#ifdef ENABLE_NEON +#include +#endif +#ifdef __cplusplus +extern "C" { +#endif +void SoftmaxNormFp16(const float16_t *src, float16_t *dst, int batch, int channel); +void SoftmaxFp16(const float16_t *input_ptr, float16_t *output_ptr, float16_t *sum_data, SoftmaxParameter *parameter); +void SoftmaxLastAxisFp16(const float16_t *src, float16_t *dst, int batch, int channel); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_SOFTMAX_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/transpose_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/transpose_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/transpose_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/transpose_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/transpose_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/transpose_fp16.h new file mode 100644 index 0000000000..9c300df2de --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/transpose_fp16.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP16_TRANSPOSE_FP16_H_ +#define MINDSPORE_NNACL_FP16_TRANSPOSE_FP16_H_ + +#include "nnacl/op_base.h" +#include "nnacl/transpose.h" +#ifdef ENABLE_NEON +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif +int Fp16DoTranspose(const float16_t *in_data, float16_t *out_data, const int *output_shape, + TransposeParameter *transpose_param, int *size, int *position); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_TRANSPOSE_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/winograd_transform_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/winograd_transform_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/winograd_transform_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/winograd_transform_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/winograd_transform_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/winograd_transform_fp16.h new file mode 100644 index 0000000000..863248950e --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/winograd_transform_fp16.h @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP16_WINOGRAD_TRANSFORM_FP16_H_ +#define MINDSPORE_NNACL_FP16_WINOGRAD_TRANSFORM_FP16_H_ + +#include +#include +#include "nnacl/errorcode.h" +#include "nnacl/fp16/cast_fp16.h" +#include "nnacl/fp16/conv_fp16.h" +#include "nnacl/fp16/matrix_fp16.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// for fp16 convolution 3x3 filter/input/output transform +void Conv3x3Fp16InputUnit(float16_t *tmp_data, float16_t *trans_input_data, size_t step); + +void Conv3x3Fp16InputTransform(const float16_t *input_data, float16_t *trans_input, float16_t *tmp_data, + int start_index, int real_cal_num, int out_w_block, ConvParameter *conv_param); + +void Conv3x3Fp16FilterTransform(const float16_t *weight_data, float16_t *trans_weight, int iC8, int output_channel, + int kernel_plane); + +void Conv3x3Fp16OutputUnit(const float16_t *gemm_out, const float16_t *bias_data, float16_t *output_data, int output_w); + +void Conv3x3Fp16OutputTransform(const float16_t *gemm_out, float16_t *out_data, const float16_t *bias_data, + int start_index, int real_cal_num, int out_w_block, ConvParameter *conv_param); + +// fp16 common winograd +void WinogradInputTransformFp16(const float16_t *input_data, float16_t *trans_input, float16_t *tmp_data, int cal_num, + int out_tile_index, int out_w_block_num, ConvParameter *conv_param, + InputTransFp16Func func); + +void WinogradOutputTransformFp16(const float16_t *gemm_out, float16_t *tmp_out_data, const float16_t *bias_data, + int cal_num, int out_tile_index, int output_unit_num, ConvParameter *conv_param, + OutputTransFp16Func func); + +// fp16 winograd weight trans +int WinogradWeightTransformFp16(const float16_t *weight_data, float16_t *winograd_data, float *matrix_g, + float *matrix_gt, int oc_block, int input_unit, int kernel_unit, int filter_channel, + int filter_batch, bool pack); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_WINOGRAD_TRANSFORM_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/winograd_utils_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/winograd_utils_fp16.c similarity index 100% rename from mindspore/lite/nnacl/fp16/winograd_utils_fp16.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/winograd_utils_fp16.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/winograd_utils_fp16.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/winograd_utils_fp16.h new file mode 100644 index 0000000000..72a4b709a0 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/winograd_utils_fp16.h @@ -0,0 +1,502 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP16_WINOGRAD_UTILS_H_ +#define MINDSPORE_NNACL_FP16_WINOGRAD_UTILS_H_ + +#include +#include "nnacl/conv_parameter.h" +#include "nnacl/op_base.h" + +#define MAX_LEN 256 + +#ifdef __cplusplus +extern "C" { +#endif +typedef void (*InputTransFp16Func)(const float16_t *src_data, float16_t *dst_data, int src_step, int dst_step, + int real_c); + +typedef void (*OutputTransFp16Func)(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); + +void GeneralInputTransformUnitFp16(const float16_t *src_data, float16_t *dst_data, float16_t *matrix_b, + float16_t *matrix_bt, int src_step, int dst_step, int in_unit); + +void GeneralOutputTransformUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + float16_t *matrix_a, float16_t *matrix_at, int src_step, int dst_step, int in_unit, + int out_unit); + +#define Load16DataFp16 \ + src[0] = vld1q_f16(src_data + 0 * src_step); \ + src[1] = vld1q_f16(src_data + 1 * src_step); \ + src[2] = vld1q_f16(src_data + 2 * src_step); \ + src[3] = vld1q_f16(src_data + 3 * src_step); \ + src[4] = vld1q_f16(src_data + 4 * src_step); \ + src[5] = vld1q_f16(src_data + 5 * src_step); \ + src[6] = vld1q_f16(src_data + 6 * src_step); \ + src[7] = vld1q_f16(src_data + 7 * src_step); \ + src[8] = vld1q_f16(src_data + 8 * src_step); \ + src[9] = vld1q_f16(src_data + 9 * src_step); \ + src[10] = vld1q_f16(src_data + 10 * src_step); \ + src[11] = vld1q_f16(src_data + 11 * src_step); \ + src[12] = vld1q_f16(src_data + 12 * src_step); \ + src[13] = vld1q_f16(src_data + 13 * src_step); \ + src[14] = vld1q_f16(src_data + 14 * src_step); \ + src[15] = vld1q_f16(src_data + 15 * src_step); + +#define Load16DataC4Fp16 \ + src[0] = vld1_f16(src_data + 0 * src_step); \ + src[1] = vld1_f16(src_data + 1 * src_step); \ + src[2] = vld1_f16(src_data + 2 * src_step); \ + src[3] = vld1_f16(src_data + 3 * src_step); \ + src[4] = vld1_f16(src_data + 4 * src_step); \ + src[5] = vld1_f16(src_data + 5 * src_step); \ + src[6] = vld1_f16(src_data + 6 * src_step); \ + src[7] = vld1_f16(src_data + 7 * src_step); \ + src[8] = vld1_f16(src_data + 8 * src_step); \ + src[9] = vld1_f16(src_data + 9 * src_step); \ + src[10] = vld1_f16(src_data + 10 * src_step); \ + src[11] = vld1_f16(src_data + 11 * src_step); \ + src[12] = vld1_f16(src_data + 12 * src_step); \ + src[13] = vld1_f16(src_data + 13 * src_step); \ + src[14] = vld1_f16(src_data + 14 * src_step); \ + src[15] = vld1_f16(src_data + 15 * src_step); + +#define Load36DataFp16 \ + src[0] = vld1q_f16(src_data + 0 * src_step); \ + src[1] = vld1q_f16(src_data + 1 * src_step); \ + src[2] = vld1q_f16(src_data + 2 * src_step); \ + src[3] = vld1q_f16(src_data + 3 * src_step); \ + src[4] = vld1q_f16(src_data + 4 * src_step); \ + src[5] = vld1q_f16(src_data + 5 * src_step); \ + src[6] = vld1q_f16(src_data + 6 * src_step); \ + src[7] = vld1q_f16(src_data + 7 * src_step); \ + src[8] = vld1q_f16(src_data + 8 * src_step); \ + src[9] = vld1q_f16(src_data + 9 * src_step); \ + src[10] = vld1q_f16(src_data + 10 * src_step); \ + src[11] = vld1q_f16(src_data + 11 * src_step); \ + src[12] = vld1q_f16(src_data + 12 * src_step); \ + src[13] = vld1q_f16(src_data + 13 * src_step); \ + src[14] = vld1q_f16(src_data + 14 * src_step); \ + src[15] = vld1q_f16(src_data + 15 * src_step); \ + src[16] = vld1q_f16(src_data + 16 * src_step); \ + src[17] = vld1q_f16(src_data + 17 * src_step); \ + src[18] = vld1q_f16(src_data + 18 * src_step); \ + src[19] = vld1q_f16(src_data + 19 * src_step); \ + src[20] = vld1q_f16(src_data + 20 * src_step); \ + src[21] = vld1q_f16(src_data + 21 * src_step); \ + src[22] = vld1q_f16(src_data + 22 * src_step); \ + src[23] = vld1q_f16(src_data + 23 * src_step); \ + src[24] = vld1q_f16(src_data + 24 * src_step); \ + src[25] = vld1q_f16(src_data + 25 * src_step); \ + src[26] = vld1q_f16(src_data + 26 * src_step); \ + src[27] = vld1q_f16(src_data + 27 * src_step); \ + src[28] = vld1q_f16(src_data + 28 * src_step); \ + src[29] = vld1q_f16(src_data + 29 * src_step); \ + src[30] = vld1q_f16(src_data + 30 * src_step); \ + src[31] = vld1q_f16(src_data + 31 * src_step); \ + src[32] = vld1q_f16(src_data + 32 * src_step); \ + src[33] = vld1q_f16(src_data + 33 * src_step); \ + src[34] = vld1q_f16(src_data + 34 * src_step); \ + src[35] = vld1q_f16(src_data + 35 * src_step); + +#define Load36DataC4Fp16 \ + src[0] = vld1_f16(src_data + 0 * src_step); \ + src[1] = vld1_f16(src_data + 1 * src_step); \ + src[2] = vld1_f16(src_data + 2 * src_step); \ + src[3] = vld1_f16(src_data + 3 * src_step); \ + src[4] = vld1_f16(src_data + 4 * src_step); \ + src[5] = vld1_f16(src_data + 5 * src_step); \ + src[6] = vld1_f16(src_data + 6 * src_step); \ + src[7] = vld1_f16(src_data + 7 * src_step); \ + src[8] = vld1_f16(src_data + 8 * src_step); \ + src[9] = vld1_f16(src_data + 9 * src_step); \ + src[10] = vld1_f16(src_data + 10 * src_step); \ + src[11] = vld1_f16(src_data + 11 * src_step); \ + src[12] = vld1_f16(src_data + 12 * src_step); \ + src[13] = vld1_f16(src_data + 13 * src_step); \ + src[14] = vld1_f16(src_data + 14 * src_step); \ + src[15] = vld1_f16(src_data + 15 * src_step); \ + src[16] = vld1_f16(src_data + 16 * src_step); \ + src[17] = vld1_f16(src_data + 17 * src_step); \ + src[18] = vld1_f16(src_data + 18 * src_step); \ + src[19] = vld1_f16(src_data + 19 * src_step); \ + src[20] = vld1_f16(src_data + 20 * src_step); \ + src[21] = vld1_f16(src_data + 21 * src_step); \ + src[22] = vld1_f16(src_data + 22 * src_step); \ + src[23] = vld1_f16(src_data + 23 * src_step); \ + src[24] = vld1_f16(src_data + 24 * src_step); \ + src[25] = vld1_f16(src_data + 25 * src_step); \ + src[26] = vld1_f16(src_data + 26 * src_step); \ + src[27] = vld1_f16(src_data + 27 * src_step); \ + src[28] = vld1_f16(src_data + 28 * src_step); \ + src[29] = vld1_f16(src_data + 29 * src_step); \ + src[30] = vld1_f16(src_data + 30 * src_step); \ + src[31] = vld1_f16(src_data + 31 * src_step); \ + src[32] = vld1_f16(src_data + 32 * src_step); \ + src[33] = vld1_f16(src_data + 33 * src_step); \ + src[34] = vld1_f16(src_data + 34 * src_step); \ + src[35] = vld1_f16(src_data + 35 * src_step); + +#define Load64DataFp16 \ + src[0] = vld1q_f16(src_data + 0 * src_step); \ + src[1] = vld1q_f16(src_data + 1 * src_step); \ + src[2] = vld1q_f16(src_data + 2 * src_step); \ + src[3] = vld1q_f16(src_data + 3 * src_step); \ + src[4] = vld1q_f16(src_data + 4 * src_step); \ + src[5] = vld1q_f16(src_data + 5 * src_step); \ + src[6] = vld1q_f16(src_data + 6 * src_step); \ + src[7] = vld1q_f16(src_data + 7 * src_step); \ + src[8] = vld1q_f16(src_data + 8 * src_step); \ + src[9] = vld1q_f16(src_data + 9 * src_step); \ + src[10] = vld1q_f16(src_data + 10 * src_step); \ + src[11] = vld1q_f16(src_data + 11 * src_step); \ + src[12] = vld1q_f16(src_data + 12 * src_step); \ + src[13] = vld1q_f16(src_data + 13 * src_step); \ + src[14] = vld1q_f16(src_data + 14 * src_step); \ + src[15] = vld1q_f16(src_data + 15 * src_step); \ + src[16] = vld1q_f16(src_data + 16 * src_step); \ + src[17] = vld1q_f16(src_data + 17 * src_step); \ + src[18] = vld1q_f16(src_data + 18 * src_step); \ + src[19] = vld1q_f16(src_data + 19 * src_step); \ + src[20] = vld1q_f16(src_data + 20 * src_step); \ + src[21] = vld1q_f16(src_data + 21 * src_step); \ + src[22] = vld1q_f16(src_data + 22 * src_step); \ + src[23] = vld1q_f16(src_data + 23 * src_step); \ + src[24] = vld1q_f16(src_data + 24 * src_step); \ + src[25] = vld1q_f16(src_data + 25 * src_step); \ + src[26] = vld1q_f16(src_data + 26 * src_step); \ + src[27] = vld1q_f16(src_data + 27 * src_step); \ + src[28] = vld1q_f16(src_data + 28 * src_step); \ + src[29] = vld1q_f16(src_data + 29 * src_step); \ + src[30] = vld1q_f16(src_data + 30 * src_step); \ + src[31] = vld1q_f16(src_data + 31 * src_step); \ + src[32] = vld1q_f16(src_data + 32 * src_step); \ + src[33] = vld1q_f16(src_data + 33 * src_step); \ + src[34] = vld1q_f16(src_data + 34 * src_step); \ + src[35] = vld1q_f16(src_data + 35 * src_step); \ + src[36] = vld1q_f16(src_data + 36 * src_step); \ + src[37] = vld1q_f16(src_data + 37 * src_step); \ + src[38] = vld1q_f16(src_data + 38 * src_step); \ + src[39] = vld1q_f16(src_data + 39 * src_step); \ + src[40] = vld1q_f16(src_data + 40 * src_step); \ + src[41] = vld1q_f16(src_data + 41 * src_step); \ + src[42] = vld1q_f16(src_data + 42 * src_step); \ + src[43] = vld1q_f16(src_data + 43 * src_step); \ + src[44] = vld1q_f16(src_data + 44 * src_step); \ + src[45] = vld1q_f16(src_data + 45 * src_step); \ + src[46] = vld1q_f16(src_data + 46 * src_step); \ + src[47] = vld1q_f16(src_data + 47 * src_step); \ + src[48] = vld1q_f16(src_data + 48 * src_step); \ + src[49] = vld1q_f16(src_data + 49 * src_step); \ + src[50] = vld1q_f16(src_data + 50 * src_step); \ + src[51] = vld1q_f16(src_data + 51 * src_step); \ + src[52] = vld1q_f16(src_data + 52 * src_step); \ + src[53] = vld1q_f16(src_data + 53 * src_step); \ + src[54] = vld1q_f16(src_data + 54 * src_step); \ + src[55] = vld1q_f16(src_data + 55 * src_step); \ + src[56] = vld1q_f16(src_data + 56 * src_step); \ + src[57] = vld1q_f16(src_data + 57 * src_step); \ + src[58] = vld1q_f16(src_data + 58 * src_step); \ + src[59] = vld1q_f16(src_data + 59 * src_step); \ + src[60] = vld1q_f16(src_data + 60 * src_step); \ + src[61] = vld1q_f16(src_data + 61 * src_step); \ + src[62] = vld1q_f16(src_data + 62 * src_step); \ + src[63] = vld1q_f16(src_data + 63 * src_step); + +#define Load64DataC4Fp16 \ + src[0] = vld1_f16(src_data + 0 * src_step); \ + src[1] = vld1_f16(src_data + 1 * src_step); \ + src[2] = vld1_f16(src_data + 2 * src_step); \ + src[3] = vld1_f16(src_data + 3 * src_step); \ + src[4] = vld1_f16(src_data + 4 * src_step); \ + src[5] = vld1_f16(src_data + 5 * src_step); \ + src[6] = vld1_f16(src_data + 6 * src_step); \ + src[7] = vld1_f16(src_data + 7 * src_step); \ + src[8] = vld1_f16(src_data + 8 * src_step); \ + src[9] = vld1_f16(src_data + 9 * src_step); \ + src[10] = vld1_f16(src_data + 10 * src_step); \ + src[11] = vld1_f16(src_data + 11 * src_step); \ + src[12] = vld1_f16(src_data + 12 * src_step); \ + src[13] = vld1_f16(src_data + 13 * src_step); \ + src[14] = vld1_f16(src_data + 14 * src_step); \ + src[15] = vld1_f16(src_data + 15 * src_step); \ + src[16] = vld1_f16(src_data + 16 * src_step); \ + src[17] = vld1_f16(src_data + 17 * src_step); \ + src[18] = vld1_f16(src_data + 18 * src_step); \ + src[19] = vld1_f16(src_data + 19 * src_step); \ + src[20] = vld1_f16(src_data + 20 * src_step); \ + src[21] = vld1_f16(src_data + 21 * src_step); \ + src[22] = vld1_f16(src_data + 22 * src_step); \ + src[23] = vld1_f16(src_data + 23 * src_step); \ + src[24] = vld1_f16(src_data + 24 * src_step); \ + src[25] = vld1_f16(src_data + 25 * src_step); \ + src[26] = vld1_f16(src_data + 26 * src_step); \ + src[27] = vld1_f16(src_data + 27 * src_step); \ + src[28] = vld1_f16(src_data + 28 * src_step); \ + src[29] = vld1_f16(src_data + 29 * src_step); \ + src[30] = vld1_f16(src_data + 30 * src_step); \ + src[31] = vld1_f16(src_data + 31 * src_step); \ + src[32] = vld1_f16(src_data + 32 * src_step); \ + src[33] = vld1_f16(src_data + 33 * src_step); \ + src[34] = vld1_f16(src_data + 34 * src_step); \ + src[35] = vld1_f16(src_data + 35 * src_step); \ + src[36] = vld1_f16(src_data + 36 * src_step); \ + src[37] = vld1_f16(src_data + 37 * src_step); \ + src[38] = vld1_f16(src_data + 38 * src_step); \ + src[39] = vld1_f16(src_data + 39 * src_step); \ + src[40] = vld1_f16(src_data + 40 * src_step); \ + src[41] = vld1_f16(src_data + 41 * src_step); \ + src[42] = vld1_f16(src_data + 42 * src_step); \ + src[43] = vld1_f16(src_data + 43 * src_step); \ + src[44] = vld1_f16(src_data + 44 * src_step); \ + src[45] = vld1_f16(src_data + 45 * src_step); \ + src[46] = vld1_f16(src_data + 46 * src_step); \ + src[47] = vld1_f16(src_data + 47 * src_step); \ + src[48] = vld1_f16(src_data + 48 * src_step); \ + src[49] = vld1_f16(src_data + 49 * src_step); \ + src[50] = vld1_f16(src_data + 50 * src_step); \ + src[51] = vld1_f16(src_data + 51 * src_step); \ + src[52] = vld1_f16(src_data + 52 * src_step); \ + src[53] = vld1_f16(src_data + 53 * src_step); \ + src[54] = vld1_f16(src_data + 54 * src_step); \ + src[55] = vld1_f16(src_data + 55 * src_step); \ + src[56] = vld1_f16(src_data + 56 * src_step); \ + src[57] = vld1_f16(src_data + 57 * src_step); \ + src[58] = vld1_f16(src_data + 58 * src_step); \ + src[59] = vld1_f16(src_data + 59 * src_step); \ + src[60] = vld1_f16(src_data + 60 * src_step); \ + src[61] = vld1_f16(src_data + 61 * src_step); \ + src[62] = vld1_f16(src_data + 62 * src_step); \ + src[63] = vld1_f16(src_data + 63 * src_step); + +InputTransFp16Func GetInputTransFp16Func(int input_unit); + +void InputTransform4x4UnitFp16(const float16_t *src_data, float16_t *dst_data, int src_step, int dst_step, int real_c); + +void InputTransform6x6UnitFp16(const float16_t *src_data, float16_t *dst_data, int src_step, int dst_step, int real_c); + +void InputTransform8x8UnitFp16(const float16_t *src_data, float16_t *dst_data, int src_step, int dst_step, int real_c); + +OutputTransFp16Func GetOutputTransFp16Func(int input_unit, int output_unit, ActType act_type); + +#define Store4DataFp16 \ + vst1q_f16(dst_data, m[0]); \ + vst1q_f16(dst_data + out_c, m[1]); \ + vst1q_f16(dst_data + dst_step * out_c, m[2]); \ + vst1q_f16(dst_data + dst_step * out_c + out_c, m[3]); + +#define Store4DataC4Fp16 \ + vst1_f16(dst_data, m[0]); \ + vst1_f16(dst_data + out_c, m[1]); \ + vst1_f16(dst_data + dst_step * out_c, m[2]); \ + vst1_f16(dst_data + dst_step * out_c + out_c, m[3]); + +#define Store9DataFp16 \ + vst1q_f16(dst_data, m[0]); \ + vst1q_f16(dst_data + out_c, m[1]); \ + vst1q_f16(dst_data + 2 * out_c, m[2]); \ + vst1q_f16(dst_data + dst_step * out_c, m[3]); \ + vst1q_f16(dst_data + dst_step * out_c + out_c, m[4]); \ + vst1q_f16(dst_data + dst_step * out_c + 2 * out_c, m[5]); \ + vst1q_f16(dst_data + 2 * dst_step * out_c, m[6]); \ + vst1q_f16(dst_data + 2 * dst_step * out_c + out_c, m[7]); \ + vst1q_f16(dst_data + 2 * dst_step * out_c + 2 * out_c, m[8]); + +#define Store9DataC4Fp16 \ + vst1_f16(dst_data, m[0]); \ + vst1_f16(dst_data + out_c, m[1]); \ + vst1_f16(dst_data + 2 * out_c, m[2]); \ + vst1_f16(dst_data + dst_step * out_c, m[3]); \ + vst1_f16(dst_data + dst_step * out_c + out_c, m[4]); \ + vst1_f16(dst_data + dst_step * out_c + 2 * out_c, m[5]); \ + vst1_f16(dst_data + 2 * dst_step * out_c, m[6]); \ + vst1_f16(dst_data + 2 * dst_step * out_c + out_c, m[7]); \ + vst1_f16(dst_data + 2 * dst_step * out_c + 2 * out_c, m[8]); + +#define Store16DataFp16 \ + vst1q_f16(dst_data, m[0]); \ + vst1q_f16(dst_data + out_c, m[1]); \ + vst1q_f16(dst_data + 2 * out_c, m[2]); \ + vst1q_f16(dst_data + 3 * out_c, m[3]); \ + vst1q_f16(dst_data + dst_step * out_c, m[4]); \ + vst1q_f16(dst_data + dst_step * out_c + out_c, m[5]); \ + vst1q_f16(dst_data + dst_step * out_c + 2 * out_c, m[6]); \ + vst1q_f16(dst_data + dst_step * out_c + 3 * out_c, m[7]); \ + vst1q_f16(dst_data + 2 * dst_step * out_c, m[8]); \ + vst1q_f16(dst_data + 2 * dst_step * out_c + out_c, m[9]); \ + vst1q_f16(dst_data + 2 * dst_step * out_c + 2 * out_c, m[10]); \ + vst1q_f16(dst_data + 2 * dst_step * out_c + 3 * out_c, m[11]); \ + vst1q_f16(dst_data + 3 * dst_step * out_c, m[12]); \ + vst1q_f16(dst_data + 3 * dst_step * out_c + out_c, m[13]); \ + vst1q_f16(dst_data + 3 * dst_step * out_c + 2 * out_c, m[14]); \ + vst1q_f16(dst_data + 3 * dst_step * out_c + 3 * out_c, m[15]); + +#define Store16DataC4Fp16 \ + vst1_f16(dst_data, m[0]); \ + vst1_f16(dst_data + out_c, m[1]); \ + vst1_f16(dst_data + 2 * out_c, m[2]); \ + vst1_f16(dst_data + 3 * out_c, m[3]); \ + vst1_f16(dst_data + dst_step * out_c, m[4]); \ + vst1_f16(dst_data + dst_step * out_c + out_c, m[5]); \ + vst1_f16(dst_data + dst_step * out_c + 2 * out_c, m[6]); \ + vst1_f16(dst_data + dst_step * out_c + 3 * out_c, m[7]); \ + vst1_f16(dst_data + 2 * dst_step * out_c, m[8]); \ + vst1_f16(dst_data + 2 * dst_step * out_c + out_c, m[9]); \ + vst1_f16(dst_data + 2 * dst_step * out_c + 2 * out_c, m[10]); \ + vst1_f16(dst_data + 2 * dst_step * out_c + 3 * out_c, m[11]); \ + vst1_f16(dst_data + 3 * dst_step * out_c, m[12]); \ + vst1_f16(dst_data + 3 * dst_step * out_c + out_c, m[13]); \ + vst1_f16(dst_data + 3 * dst_step * out_c + 2 * out_c, m[14]); \ + vst1_f16(dst_data + 3 * dst_step * out_c + 3 * out_c, m[15]); + +#define Store25DataFp16 \ + vst1q_f16(dst_data, m[0]); \ + vst1q_f16(dst_data + out_c, m[1]); \ + vst1q_f16(dst_data + 2 * out_c, m[2]); \ + vst1q_f16(dst_data + 3 * out_c, m[3]); \ + vst1q_f16(dst_data + 4 * out_c, m[4]); \ + vst1q_f16(dst_data + dst_step * out_c, m[5]); \ + vst1q_f16(dst_data + dst_step * out_c + out_c, m[6]); \ + vst1q_f16(dst_data + dst_step * out_c + 2 * out_c, m[7]); \ + vst1q_f16(dst_data + dst_step * out_c + 3 * out_c, m[8]); \ + vst1q_f16(dst_data + dst_step * out_c + 4 * out_c, m[9]); \ + vst1q_f16(dst_data + 2 * dst_step * out_c, m[10]); \ + vst1q_f16(dst_data + 2 * dst_step * out_c + out_c, m[11]); \ + vst1q_f16(dst_data + 2 * dst_step * out_c + 2 * out_c, m[12]); \ + vst1q_f16(dst_data + 2 * dst_step * out_c + 3 * out_c, m[13]); \ + vst1q_f16(dst_data + 2 * dst_step * out_c + 4 * out_c, m[14]); \ + vst1q_f16(dst_data + 3 * dst_step * out_c, m[15]); \ + vst1q_f16(dst_data + 3 * dst_step * out_c + out_c, m[16]); \ + vst1q_f16(dst_data + 3 * dst_step * out_c + 2 * out_c, m[17]); \ + vst1q_f16(dst_data + 3 * dst_step * out_c + 3 * out_c, m[18]); \ + vst1q_f16(dst_data + 3 * dst_step * out_c + 4 * out_c, m[19]); \ + vst1q_f16(dst_data + 4 * dst_step * out_c, m[20]); \ + vst1q_f16(dst_data + 4 * dst_step * out_c + out_c, m[21]); \ + vst1q_f16(dst_data + 4 * dst_step * out_c + 2 * out_c, m[22]); \ + vst1q_f16(dst_data + 4 * dst_step * out_c + 3 * out_c, m[23]); \ + vst1q_f16(dst_data + 4 * dst_step * out_c + 4 * out_c, m[24]); + +#define Store25DataC4Fp16 \ + vst1_f16(dst_data, m[0]); \ + vst1_f16(dst_data + out_c, m[1]); \ + vst1_f16(dst_data + 2 * out_c, m[2]); \ + vst1_f16(dst_data + 3 * out_c, m[3]); \ + vst1_f16(dst_data + 4 * out_c, m[4]); \ + vst1_f16(dst_data + dst_step * out_c, m[5]); \ + vst1_f16(dst_data + dst_step * out_c + out_c, m[6]); \ + vst1_f16(dst_data + dst_step * out_c + 2 * out_c, m[7]); \ + vst1_f16(dst_data + dst_step * out_c + 3 * out_c, m[8]); \ + vst1_f16(dst_data + dst_step * out_c + 4 * out_c, m[9]); \ + vst1_f16(dst_data + 2 * dst_step * out_c, m[10]); \ + vst1_f16(dst_data + 2 * dst_step * out_c + out_c, m[11]); \ + vst1_f16(dst_data + 2 * dst_step * out_c + 2 * out_c, m[12]); \ + vst1_f16(dst_data + 2 * dst_step * out_c + 3 * out_c, m[13]); \ + vst1_f16(dst_data + 2 * dst_step * out_c + 4 * out_c, m[14]); \ + vst1_f16(dst_data + 3 * dst_step * out_c, m[15]); \ + vst1_f16(dst_data + 3 * dst_step * out_c + out_c, m[16]); \ + vst1_f16(dst_data + 3 * dst_step * out_c + 2 * out_c, m[17]); \ + vst1_f16(dst_data + 3 * dst_step * out_c + 3 * out_c, m[18]); \ + vst1_f16(dst_data + 3 * dst_step * out_c + 4 * out_c, m[19]); \ + vst1_f16(dst_data + 4 * dst_step * out_c, m[20]); \ + vst1_f16(dst_data + 4 * dst_step * out_c + out_c, m[21]); \ + vst1_f16(dst_data + 4 * dst_step * out_c + 2 * out_c, m[22]); \ + vst1_f16(dst_data + 4 * dst_step * out_c + 3 * out_c, m[23]); \ + vst1_f16(dst_data + 4 * dst_step * out_c + 4 * out_c, m[24]); + +void OutputTransform4x2UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform4x2ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform4x2Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform4x3UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform4x3ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform4x3Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); + +void OutputTransform6x2UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x2ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x2Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x3UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x3ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x3Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x4UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x4ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x4Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x5UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x5ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x5Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); + +void OutputTransform8x2UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x2ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x2Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x3UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x3ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x3Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x4UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x4ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x4Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x5UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x5ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x5Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x6ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x6Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x7UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x7ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x7Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, + int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); + +int SelectOutputUnitFp16(ConvParameter *conv_param); + +void CheckIfUseWinogradFp16(bool *use_winograd, int *output_unit, ConvParameter *conv_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_WINOGRAD_UTILS_H_ diff --git a/mindspore/lite/nnacl/fp16_grad/activation_grad.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16_grad/activation_grad.c similarity index 100% rename from mindspore/lite/nnacl/fp16_grad/activation_grad.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16_grad/activation_grad.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16_grad/activation_grad.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16_grad/activation_grad.h new file mode 100644 index 0000000000..36d3e8d333 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16_grad/activation_grad.h @@ -0,0 +1,42 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP16_GRAD_ACTIVATION_GRAD_H_ +#define MINDSPORE_NNACL_FP16_GRAD_ACTIVATION_GRAD_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include +#include "nnacl/op_base.h" +#include "nnacl/int8/fixed_point.h" + +typedef struct ActivationGradParameterFp16 { + OpParameter op_parameter; + int type_; + float alpha_; +} ActivationGradParameterFp16; +#ifdef __cplusplus +extern "C" { +#endif + +int Fp16ReluGrad(const float16_t *src0, const float16_t *src1, size_t length, float16_t *dst); +int Fp16SigmoidGrad(const float16_t *src0, const float16_t *src1, size_t length, float16_t *dst); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_GRAD_ACTIVATION_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp16_grad/arithmetic_self_grad.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16_grad/arithmetic_self_grad.c similarity index 100% rename from mindspore/lite/nnacl/fp16_grad/arithmetic_self_grad.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16_grad/arithmetic_self_grad.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16_grad/arithmetic_self_grad.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16_grad/arithmetic_self_grad.h new file mode 100644 index 0000000000..9a99ade802 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16_grad/arithmetic_self_grad.h @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP16_GRAD_ARITHMETHIC_SELF_GRAD_H_ +#define MINDSPORE_NNACL_FP16_GRAD_ARITHMETHIC_SELF_GRAD_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include +#include "nnacl/op_base.h" + +typedef struct ArithmeticSelfGradParameterFp16 { + OpParameter op_parameter; + int type_; +} ArithmeticSelfGradParameterFp16; +#ifdef __cplusplus +extern "C" { +#endif + +int Fp16LogGrad(const float16_t *src0, const float16_t *src1, size_t length, float16_t *dst); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP16_GRAD_ARITHMETHIC_SELF_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32/activation_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/activation_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/activation_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/activation_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/activation_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/activation_fp32.h new file mode 100644 index 0000000000..3a3b4183d3 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/activation_fp32.h @@ -0,0 +1,49 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_ACTIVATION_H_ +#define MINDSPORE_NNACL_FP32_ACTIVATION_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/int8/fixed_point.h" + +typedef struct ActivationParameter { + OpParameter op_parameter_; + int type_; + float alpha_; + float min_val_; + float max_val_; +} ActivationParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int Fp32Relu(const float *src, int length, float *dst); +int Fp32Relu6(const float *src, int length, float *dst); +int LRelu(const float *src, int length, float *dst, float alpha); +int Sigmoid(const float *src, int length, float *dst); +int Tanh(const float *src, int length, float *dst); +int HSigmoid(const float *src, int length, float *dst); +int Swish(const float *src, int length, float *dst); +int HSwish(const float *src, int length, float *dst); +int HardTanh(const float *src, int length, float *dst, float min_val, float max_val); +int Gelu(const float *src, int length, float *dst, bool approximate); + +float TanhOpt(float src); +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_FP32_ACTIVATION_H_ diff --git a/mindspore/lite/nnacl/fp32/add_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/add_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/add_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/add_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/add_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/add_fp32.h new file mode 100644 index 0000000000..f5d44a30d1 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/add_fp32.h @@ -0,0 +1,45 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_ADD_H_ +#define MINDSPORE_NNACL_FP32_ADD_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/base/arithmetic_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ElementAdd(const float *in0, const float *in1, float *out, int size); +int ElementAddRelu(const float *in0, const float *in1, float *out, int size); +int ElementAddRelu6(const float *in0, const float *in1, float *out, int size); +int ElementAddInt(const int *in0, const int *in1, int *out, int size); +int ElementOptAdd(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptAddInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); +int ElementOptAddRelu(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptAddRelu6(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int BroadcastAdd(const float *in0, const float *in1, float *tile_in0, float *tile_in1, float *out, int size, + ArithmeticParameter *param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_ADD_H_ diff --git a/mindspore/lite/nnacl/fp32/adder_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/adder_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/adder_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/adder_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/adder_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/adder_fp32.h new file mode 100644 index 0000000000..3908102a83 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/adder_fp32.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_ADDER_H_ +#define MINDSPORE_NNACL_FP32_ADDER_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/common_func.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef ENABLE_ARM64 +void AdderFloatNeon64(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, + int col, size_t stride); +#endif + +void AdderOpt(const float *a, const float *b, float *c, const float *bias, ActType act_type, int deep, int row, int col, + size_t stride); + +void AdderFp32(const float *input_data, float *packed_input, const float *packed_weight, const float *bias_data, + float *col_major_input, float *output_data, int task_id, ConvParameter *conv_param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_ADDER_H_ diff --git a/mindspore/lite/nnacl/fp32/arg_min_max_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arg_min_max_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/arg_min_max_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arg_min_max_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arg_min_max_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arg_min_max_fp32.h new file mode 100644 index 0000000000..1cb61a8457 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arg_min_max_fp32.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_ARG_MIN_MAX_H_ +#define MINDSPORE_NNACL_FP32_ARG_MIN_MAX_H_ + +#include "nnacl/nnacl_common.h" +#include "nnacl/arg_min_max_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void ArgMinMaxFp32(const float *input, void *output, float *output_value, const int *in_shape, + const ArgMinMaxParameter *param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_ARG_MIN_MAX_H_ diff --git a/mindspore/lite/nnacl/fp32/arithmetic_compare_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_compare_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/arithmetic_compare_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_compare_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_compare_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_compare_fp32.h new file mode 100644 index 0000000000..61874686cd --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_compare_fp32.h @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_ARITHMETIC_COMPARE_H_ +#define MINDSPORE_NNACL_ARITHMETIC_COMPARE_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif +int ElementEqualFp32(const float *input0, const float *input1, uint8_t *output, int element_size); +int ElementEqualInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); + +int ElementNotEqualFp32(const float *input0, const float *input1, uint8_t *output, int element_size); +int ElementNotEqualInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); + +int ElementLessFp32(const float *input0, const float *input1, uint8_t *output, int element_size); +int ElementLessInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); + +int ElementLessEqualFp32(const float *input0, const float *input1, uint8_t *output, int element_size); +int ElementLessEqualInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); + +int ElementGreaterFp32(const float *input0, const float *input1, uint8_t *output, int element_size); +int ElementGreaterInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); + +int ElementGreaterEqualFp32(const float *input0, const float *input1, uint8_t *output, int element_size); +int ElementGreaterEqualInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_ARITHMETIC_COMPARE_H_ diff --git a/mindspore/lite/nnacl/fp32/arithmetic_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/arithmetic_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_fp32.h new file mode 100644 index 0000000000..9b1c64b6ad --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_fp32.h @@ -0,0 +1,71 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_ARITHMETIC_H_ +#define MINDSPORE_NNACL_ARITHMETIC_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/base/arithmetic_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/fp32/add_fp32.h" +#include "nnacl/fp32/mul_fp32.h" +#include "nnacl/fp32/div_fp32.h" +#include "nnacl/fp32/sub_fp32.h" +#include "nnacl/fp32/squared_difference.h" + +#ifdef __cplusplus +extern "C" { +#endif +void TileOneDimensionFp32(const float *inData, float *outData, int dim, size_t ndim, const int *inShape, + const int *inStrides, const int *outStrides, const int *multiple); +void TileDimensionsFp32(const float *data0, const float *data1, float *tile_data0, float *tile_data1, + ArithmeticParameter *param); +/* logical and */ +int ElementLogicalAnd(const float *in0, const float *in1, float *out, int size); +int ElementLogicalAndInt(const int *in0, const int *in1, int *out, int size); +int ElementLogicalAndBool(const bool *in0, const bool *in1, bool *out, int size); + +/* logical or */ +int ElementLogicalOr(const float *in0, const float *in1, float *out, int size); +int ElementLogicalOrBool(const bool *in0, const bool *in1, bool *out, int size); + +/* max min */ +int ElementMaximum(const float *in0, const float *in1, float *out, int size); +int ElementMinimum(const float *in0, const float *in1, float *out, int size); +int ElementMaximumInt(const int *in0, const int *in1, int *out, int size); +int ElementMinimumInt(const int *input0, const int *input1, int *output, const int element_size); + +/* floor div */ +int ElementFloorDiv(const float *in0, const float *in1, float *out, int size); +int ElementFloorDivInt(const int *in0, const int *in1, int *out, int size); + +/* floor mod */ +int ElementFloorMod(const float *in0, const float *in1, float *out, int size); +int ElementFloorModInt(const int *in0, const int *in1, int *out, int size); + +/* mod */ +int ElementMod(const float *in0, const float *in1, float *out, int size); +int ElementModInt(const int *in0, const int *in1, int *out, int size); +int ElementOptMod(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptModInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_ARITHMETIC_H_ diff --git a/mindspore/lite/nnacl/fp32/arithmetic_self_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_self_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/arithmetic_self_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_self_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_self_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_self_fp32.h new file mode 100644 index 0000000000..5ab6a86de5 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/arithmetic_self_fp32.h @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_ARITHMETIC_SELF_H_ +#define MINDSPORE_NNACL_ARITHMETIC_SELF_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif +int ElementAbs(const float *input, float *output, const int element_size); + +int ElementCos(const float *input, float *output, const int element_size); + +int ElementLog(const float *input, float *output, const int element_size); + +int ElementSquare(const float *input, float *output, const int element_size); + +int ElementSqrt(const float *input, float *output, const int element_size); + +int ElementRsqrt(const float *input, float *output, const int element_size); + +int ElementSin(const float *input, float *output, const int element_size); + +int ElementLogicalNot(const float *input, float *output, const int element_size); + +int ElementLogicalNotBool(const bool *input, bool *output, const int element_size); + +int ElementRound(const float *input, float *output, const int element_size); + +int ElementFloor(const float *input, float *output, const int element_size); + +int ElementCeil(const float *input, float *output, const int number); + +int ElementNegative(const float *input, float *output, const int element_size); + +int ElementReciprocal(const float *input, float *output, const int element_size); + +int ElementErf(const float *input, float *output, const int element_size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_ARITHMETIC_SELF_H_ diff --git a/mindspore/lite/nnacl/fp32/batchnorm_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/batchnorm_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/batchnorm_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/batchnorm_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/batchnorm_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/batchnorm_fp32.h new file mode 100644 index 0000000000..ba8fecba39 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/batchnorm_fp32.h @@ -0,0 +1,37 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_BATCHNORM_H_ +#define MINDSPORE_NNACL_FP32_BATCHNORM_H_ + +#include "nnacl/batchnorm_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void BatchNormFp32(const void *input, const void *mean, const void *variance, const BatchNormParameter *param, + int task_id, void *output); +void FusedBatchNormFp32(const void *input, const void *scale, const void *offset, const void *mean, + const void *variance, const BatchNormParameter *param, int task_id, void *output); + +void FusedBatchNormFp32MeanVar(const float *input, float *run_mean, float *run_var, const BatchNormParameter *param, + float *save_mean, float *save_var); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FUSED_BATCHNORM_H_ diff --git a/mindspore/lite/nnacl/fp32/broadcast_to_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/broadcast_to_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/broadcast_to_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/broadcast_to_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/broadcast_to_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/broadcast_to_fp32.h new file mode 100644 index 0000000000..e59c0158c9 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/broadcast_to_fp32.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_BROADCAST_TO_FP32_H_ +#define MINDSPORE_NNACL_FP32_BROADCAST_TO_FP32_H_ + +#include "nnacl/op_base.h" +#include "nnacl/broadcast_to_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int BroadcastTo(const float *input, BroadcastShapeInfo *shape_info, float *output); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_BROADCAST_TO_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/common_func_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/common_func_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/common_func_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/common_func_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/common_func_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/common_func_fp32.h new file mode 100644 index 0000000000..649850dcdb --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/common_func_fp32.h @@ -0,0 +1,105 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_COMMON_FUNC_H_ +#define MINDSPORE_NNACL_FP32_COMMON_FUNC_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/conv_parameter.h" + +typedef struct ConvDwFp32BorderParam { + float *dst; + const float *src; + const float *weight; + const float *bias; + size_t height; + size_t width; + size_t in_kh_step; + size_t in_kw_step; + size_t kernel_w; + size_t relu; + size_t relu6; +} ConvDwFp32BorderParam; + +#ifdef __cplusplus +extern "C" { +#endif + +void PostConvFuncFp32C8(const float *c8_out_ptr, float *out_ptr, const float *bias_ptr, size_t output_channel, + size_t plane_size, size_t stride, size_t relu_type); +void PostConvFuncFp32C4(const float *c4_out_ptr, float *out_ptr, const float *bias_ptr, size_t output_channel, + size_t plane_size, size_t plane_stride, size_t relu_type); + +void WinogradTransLeft(const float *S, const float *B, float *M, size_t w, size_t h, size_t k, size_t length); +void WinogradTransRight(const float *S, const float *B, float *M, size_t w, size_t h, size_t k, size_t length); + +#if defined(ENABLE_ARM) || defined(ENABLE_SSE) +void ConvDwFp32Center(float *dst, const float *src, const float *weight, const float *bias, size_t height, size_t width, + size_t kernel_h, size_t kernel_w, size_t out_h_step, size_t block_channel, size_t in_sh_step, + size_t in_sw_step, size_t in_kh_step, size_t in_kw_step, size_t relu, size_t relu6); +#ifdef ENABLE_AVX +void ConvDwFp32Border(ConvDwFp32BorderParam *param); +#else +void ConvDwFp32Border(float *dst, const float *src, const float *weight, const float *bias, size_t height, size_t width, + size_t in_kh_step, size_t in_kw_step, size_t kernel_w, size_t relu, size_t relu6); +#endif +void DeconvDwFp32Center(float *dst, const float *src, const float *weight, size_t height, size_t width, size_t kernel_h, + size_t kernel_w, size_t out_h_step, size_t block_channel, size_t in_sh_step, size_t in_sw_step, + size_t in_kh_step, size_t in_kw_step); +void PostFuncBiasReluC8(float *dst, const float *src, const float *bias, size_t oc8div, size_t oc8mod, + size_t plane_size, size_t stride, size_t relu_type); +void ConvDwFp32Row(float *output_ptr, const float *input_ptr, const float *weight_ptr, size_t num_pixels, + size_t output_channel, size_t input_step); +void PostFuncBiasReluC4(float *dst, const float *src, const float *bias, size_t oc4div, size_t oc4mod, + size_t plane_size, size_t plane_stride, size_t relu_type); +#endif + +#ifdef ENABLE_ARM64 +void BiasAdd(const float *bias, float *data, size_t oc4, size_t plan_size); +void BiasAddRelu6(const float *bias, float *data, size_t oc4, size_t plan_size); +void BiasAddRelu(const float *bias, float *data, size_t oc4, size_t plan_size); +void Relu6(float *data, size_t element4); +void Relu(float *data, size_t element4); + +void DeconvDwFp32Border(float *dst, const float *src, const float *weight, size_t height, size_t width, + size_t in_kh_step, size_t in_kw_step, size_t kernel_w); + +void ConvSwFp32Center(float *dst, const float *src, const float *weight, const float *bias, size_t height, size_t width, + size_t kernel_h, size_t kernel_w, size_t out_h_step, size_t block_channel, size_t ic4, + size_t in_sh_step, size_t in_sw_step, size_t in_kh_step, size_t in_kw_step, size_t relu, + size_t relu6); + +void ConvDw3x3Stride1(float *output, const float *buffer, const float *weight, const float *bias, int col_size, + int row_size, int channel, int output_h, int output_w, size_t relu, size_t relu6); + +void ConvDw3x3Stride2(float *output, const float *buffer, const float *weight, const float *bias, int col_size, + int row_size, int channel, int output_h, int output_w, size_t relu, size_t relu6); + +void ConvDw3x3Corner(float *dst, const float *src, const float *weight, const float *bias, int in_kh_step, + int in_kw_step, int channel, size_t relu, size_t relu6); + +void ConvDw3x3Vertical(float *dst, const float *src, const float *weight, const float *bias, int in_kh_step, + int in_kw_step, int channel, size_t relu, size_t relu6); + +void ConvDw3x3Horizontal(float *dst, const float *src, const float *weight, const float *bias, int in_kh_step, + int in_kw_step, int channel, size_t relu, size_t relu6); +#endif + +#ifdef __cplusplus +} +#endif +#endif /* MINDSPORE_NNACL_FP32_COMMON_FUNC_H_ */ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/constant_of_shape_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/constant_of_shape_fp32.h new file mode 100644 index 0000000000..6c607cf503 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/constant_of_shape_fp32.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_CONSTANT_OF_SHAPE_FP32_H_ +#define MINDSPORE_NNACL_FP32_CONSTANT_OF_SHAPE_FP32_H_ +#include +#include +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/constant_of_shape_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +inline int ConstantOfShapeInt32(int32_t *output, int start, int end, int32_t value) { + for (int i = start; i < end; i++) { + output[i] = value; + } + return NNACL_OK; +} + +inline int ConstantOfShapeFp32(float *output, int start, int end, float value) { + for (int i = start; i < end; i++) { + output[i] = value; + } + return NNACL_OK; +} + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_CONSTANT_OF_SHAPE_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/conv_common_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_common_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/conv_common_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_common_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_common_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_common_fp32.h new file mode 100644 index 0000000000..c1cec8c959 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_common_fp32.h @@ -0,0 +1,40 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_CONV_COMMON_H_ +#define MINDSPORE_NNACL_FP32_CONV_COMMON_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/common_func.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// fp32 convolution common (im2col+gemm) +void ConvFp32(const float *input_data, float *packed_input, const float *packed_weight, const float *bias_data, + float *col_major_input, float *output_data, int task_id, const ConvParameter *conv_param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_CONV_COMMON_H_ diff --git a/mindspore/lite/nnacl/fp32/conv_depthwise_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_depthwise_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/conv_depthwise_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_depthwise_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_depthwise_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_depthwise_fp32.h new file mode 100644 index 0000000000..5a8024805b --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_depthwise_fp32.h @@ -0,0 +1,91 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_CONV_DEPTHWISE_H_ +#define MINDSPORE_NNACL_FP32_CONV_DEPTHWISE_H_ + +#include "nnacl/conv_parameter.h" + +#ifndef ENABLE_ARM64 +void DepthwiseCenter(float *dst, const float *src, const float *weight, const float *bias, int height, int width, + int kernel_h, int kernel_w, int out_h_step, int block_channel, int in_sh_step, int in_sw_step, + int in_kh_step, int in_kw_step, bool is_relu, bool is_relu6); +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +void ConvDw(float *output_data, const float *input_data, const float *weight_data, const float *bias_data, + const ConvParameter *conv_param, int task_id); + +void InitSlidingParam(SlidingWindowParam *sliding, const ConvParameter *conv_param, int block); + +void InitSlidingParamConv(SlidingWindowParam *sliding, const ConvParameter *conv_param, int block); + +void AppendSlidingParamConv(SlidingWindowParam *sliding, const ConvParameter *conv_param, int block); + +void InitSlidingParamConvDw(SlidingWindowParam *sliding, const ConvParameter *conv_param, int block); + +void AppendSlidingParamConvDw(SlidingWindowParam *sliding, const ConvParameter *conv_param, int block); + +void ConvDwSWFp32(float *output_data, const float *input_data, const float *weight_data, const float *bias_data, + const ConvParameter *conv_param, const SlidingWindowParam *sliding, int task_id); + +bool CheckConvDwUse3X3(const ConvParameter *conv_param); + +bool CheckConvDwUseIndirectBuffer(const ConvParameter *conv_param); + +void ConvDwInitIndirection(float **indirect_buffer, float *src, float *zero_ptr, const ConvParameter *conv_param, + int step_h, int step_w); + +#ifdef ENABLE_ARM64 +void ConvDwFp32Indirect3x3(float *output, float **input, const float *weights, const float *bias, int channels, + int output_width, size_t input_stride, size_t relu, size_t relu6); + +void ConvDwFp32Indirect5x5(float *output, float **input, const float *weights, const float *bias, int channels, + int output_width, size_t input_stride, size_t relu, size_t relu6); +#endif + +#ifdef ENABLE_AVX +void ConvDwFp32Avx3x3(float *output, float **input, const float *weights, const float *bias, size_t channels, + size_t output_width, size_t input_stride, size_t relu, size_t relu6); + +void ConvDwFp32Avx5x5(float *output, float **input, const float *weights, const float *bias, size_t channels, + size_t output_width, size_t input_stride, size_t relu, size_t relu6); +#endif + +#if defined(ENABLE_ARM) || (defined(ENABLE_SSE) && !defined(ENABLE_AVX)) +void ConvDw3x3(float *output_data, float *buffer, const float *input_data, const float *weight_data, + const float *bias_data, const ConvParameter *conv_param, int start_oh, int end_oh); + +bool CheckConvDw1DWinograd(const ConvParameter *conv_param, int thread_num); +#endif + +void ConvDwFp32IndirectRow(float *output, float **input, const float *weights, const float *bias, int channels, + int output_width, int input_stride, bool relu, bool relu6, int kernel); + +void ConvDwIndirection(float *output_data, float **indirect_buffer, const float *weight_data, const float *bias_data, + float *zero_ptr, const ConvParameter *conv_param, int task_id); + +void DeconvDwSWFp32(float *output_data, const float *input_data, const float *weight_data, const float *bias_data, + const ConvParameter *conv_param, const SlidingWindowParam *sliding, int task_id); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_CONV_DEPTHWISE_H_ diff --git a/mindspore/lite/nnacl/fp32/conv_winograd_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_winograd_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/conv_winograd_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_winograd_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_winograd_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_winograd_fp32.h new file mode 100644 index 0000000000..2b84d3e77a --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_winograd_fp32.h @@ -0,0 +1,44 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_CONV_WINOGRAD_H_ +#define MINDSPORE_NNACL_FP32_CONV_WINOGRAD_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/common_func.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/fp32/winograd_utils.h" +#include "nnacl/fp32/conv_depthwise_fp32.h" + +typedef float *TmpBufferAddress; + +#ifdef __cplusplus +extern "C" { +#endif + +// fp32 convolution winograd +void ConvWinogardFp32(const float *input_data, const float *trans_weight, const float *bias_data, float *output_data, + TmpBufferAddress *buffer_list, int task_id, const ConvParameter *conv_param, + InputTransFunc in_func, OutputTransFunc out_func); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_CONV_WINOGRAD_H_ diff --git a/mindspore/lite/nnacl/fp32/crop_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/crop_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/crop_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/crop_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/crop_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/crop_fp32.h new file mode 100644 index 0000000000..816ea7899a --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/crop_fp32.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_CROP_H_ +#define MINDSPORE_NNACL_FP32_CROP_H_ +#include "nnacl/op_base.h" +#include "nnacl/crop_parameter.h" + +#define CROP_OFFSET_MAX_SIZE 4 + +#ifdef __cplusplus +extern "C" { +#endif +void Crop4D(const float *input, float *output, const int *in_shape, const int *out_shape, + const CropParameter *crop_param, int thread_id); +void Crop4DNoParallel(const float *input, float *output, const int *in_shape, const int *out_shape, + const CropParameter *crop_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_CROP_H_ diff --git a/mindspore/lite/nnacl/fp32/deconv_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/deconv_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/deconv_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/deconv_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/deconv_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/deconv_fp32.h new file mode 100644 index 0000000000..06ba80c149 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/deconv_fp32.h @@ -0,0 +1,37 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_DECONV_H_ +#define MINDSPORE_NNACL_FP32_DECONV_H_ + +#include +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/errorcode.h" +#include "nnacl/fp32/common_func_fp32.h" +#include "nnacl/base/minimal_filtering_generator.h" + +#ifdef __cplusplus +extern "C" { +#endif +void PackDeConvWeightFp32(const float *weight, float *dst, int input_channel, int output_channel, int plane); +void DeConvPostFp32C8(const float *src, float *tmp_out, const float *bias, float *dst, int output_channel, + const ConvParameter *conv_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_DECONV_H_ diff --git a/mindspore/lite/nnacl/fp32/deconv_winograd_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/deconv_winograd_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/deconv_winograd_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/deconv_winograd_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/deconv_winograd_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/deconv_winograd_fp32.h new file mode 100644 index 0000000000..31f7be517e --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/deconv_winograd_fp32.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_DECONV_WINOGRAD_H_ +#define MINDSPORE_NNACL_FP32_DECONV_WINOGRAD_H_ + +#include +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/errorcode.h" +#include "nnacl/fp32/common_func_fp32.h" +#include "nnacl/base/minimal_filtering_generator.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int PackDeConvWgDataFp32(const float *nhwc_weight, DeConvComputeUnit *unit, const ConvParameter *conv_param, + const DeConvParam *deconv_param); +void DeconvWg(const float *nhwc_input_, float *tile_in, float *tile_out, int start_index, int calculate_count, + const ConvParameter *conv_param, DeConvParam *deconv_param, int task_id); +void DeconvWgPost(const float *tile_out, float *nc4hw4_output, const ConvParameter *conv_param, + const DeConvParam *deconv_param, int calculate_count, int tile_index); +void TiledC4MatmulFp32(float *dst, const float *src, const float *weight, size_t ic4, size_t cal_num, size_t oc4); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_DECONV_WINOGRAD_H_ diff --git a/mindspore/lite/nnacl/fp32/detection_post_process_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/detection_post_process_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/detection_post_process_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/detection_post_process_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/detection_post_process_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/detection_post_process_fp32.h new file mode 100644 index 0000000000..56442dc87f --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/detection_post_process_fp32.h @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_DETECTION_POST_PROCESS_H_ +#define MINDSPORE_NNACL_FP32_DETECTION_POST_PROCESS_H_ + +#include "nnacl/op_base.h" +#include "nnacl/detection_post_process_parameter.h" + +typedef struct { + float y; + float x; + float h; + float w; +} BboxCenter; + +typedef struct { + float ymin; + float xmin; + float ymax; + float xmax; +} BboxCorner; + +#ifdef __cplusplus +extern "C" { +#endif +int DecodeBoxes(int num_boxes, const float *input_boxes, const float *anchors, + const DetectionPostProcessParameter *param); + +int NmsMultiClassesFastCore(const int num_boxes, const int num_classes_with_bg, const float *input_scores, + void (*)(const float *, int *, int, int), const DetectionPostProcessParameter *param, + const int task_id, const int thread_num); + +int DetectionPostProcessFast(const int num_boxes, const int num_classes_with_bg, const float *input_scores, + const float *decoded_boxes, float *output_boxes, float *output_classes, + float *output_scores, float *output_num, void (*)(const float *, int *, int, int), + const DetectionPostProcessParameter *param); + +int DetectionPostProcessRegular(const int num_boxes, const int num_classes_with_bg, const float *input_scores, + float *output_boxes, float *output_classes, float *output_scores, float *output_num, + void (*)(const float *, int *, int, int), const DetectionPostProcessParameter *param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_DETECTION_POST_PROCESS_H_ diff --git a/mindspore/lite/nnacl/fp32/div_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/div_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/div_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/div_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/div_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/div_fp32.h new file mode 100644 index 0000000000..a74c1cf859 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/div_fp32.h @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_DIV_H_ +#define MINDSPORE_NNACL_FP32_DIV_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/base/arithmetic_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif +int ElementDiv(const float *in0, const float *in1, float *out, int size); +int ElementDivRelu(const float *in0, const float *in1, float *out, int size); +int ElementDivRelu6(const float *in0, const float *in1, float *out, int size); +int ElementOptDiv(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptDivRelu(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptDivRelu6(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptDivInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); +int BroadcastDiv(const float *in0, const float *in1, float *tile_in0, float *tile_in1, float *out, int size, + ArithmeticParameter *param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_DIV_H_ diff --git a/mindspore/lite/nnacl/fp32/elu_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/elu_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/elu_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/elu_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/elu_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/elu_fp32.h new file mode 100644 index 0000000000..c26f8f5029 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/elu_fp32.h @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_ELU_H_ +#define MINDSPORE_NNACL_FP32_ELU_H_ + +#include "nnacl/op_base.h" + +typedef struct EluParameter { + OpParameter op_parameter_; + // primitive parameter + float alpha_; + + // shape correlative + int in_size_; +} EluParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int Elu(const float *input_data, float *output_data, const EluParameter *parameter, int task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_ELU_H_ diff --git a/mindspore/lite/nnacl/fp32/embedding_lookup_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/embedding_lookup_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/embedding_lookup_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/embedding_lookup_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/embedding_lookup_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/embedding_lookup_fp32.h new file mode 100644 index 0000000000..40b3b4c7d7 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/embedding_lookup_fp32.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_EMBEDDING_LOOKUP_H_ +#define MINDSPORE_NNACL_FP32_EMBEDDING_LOOKUP_H_ + +#include "nnacl/op_base.h" + +typedef struct EmbeddingLookupParameter { + OpParameter op_parameter_; + // primitive parameter + float max_norm_; + + // shape correlative + bool *is_regulated_; + int ids_size_; + int layer_size_; + int layer_num_; +} EmbeddingLookupParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int EmbeddingLookup(float *input_data, const int *ids, float *output_data, const EmbeddingLookupParameter *parameter, + int task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_EMBEDDING_LOOKUP_H_ diff --git a/mindspore/lite/nnacl/fp32/exp_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/exp_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/exp_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/exp_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/exp_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/exp_fp32.h new file mode 100644 index 0000000000..b90056e466 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/exp_fp32.h @@ -0,0 +1,103 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_EXP_H_ +#define MINDSPORE_NNACL_FP32_EXP_H_ + +#include "nnacl/op_base.h" + +typedef struct ExpParameter { + // Primitive parameter + OpParameter op_parameter_; + float base_; + float scale_; + float shift_; + // other parameter + int thread_num_; + float in_scale_; + float out_scale_; + int element_num_; +} ExpParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int Exp(const float *input_data, float *output_data, const ExpParameter *parameter, int task_id); +void ExpFp32(const float *src, float *dst, int num); + +#if defined(ENABLE_ARM) || defined(ENABLE_SSE) +static inline void simd_exp(MS_FLOAT32X4 input, float *dst) { + static MS_FLOAT32X4 maxv = {88.0f, 88.0f, 88.0f, 88.0f}; + static MS_FLOAT32X4 minv = {-88.0f, -88.0f, -88.0f, -88.0f}; + static MS_FLOAT32X4 param[] = {{0.693147f, 0.693147f, 0.693147f, 0.693147f}, + {1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120}, + {1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24}, + {1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6}, + {0.5f, 0.5f, 0.5f, 0.5f}, + {1.0f, 1.0f, 1.0f, 1.0f}}; + + input = MS_MAXQ_F32(minv, MS_MINQ_F32(input, maxv)); + MS_INT32X4 integer = MS_CVTQPS_EPI32(input / param[0]); + MS_FLOAT32X4 decimal = input - MS_CVTQEPI32_PS(integer) * param[0]; + MS_INT32X4 int_exp = MS_SLLIQ_EPI32(MS_ADDQ_EPI32(integer, MS_MOVQ_EPI32(127)), 23); + MS_FLOAT32X4 decimal_exp = + param[5] + + decimal * (param[5] + decimal * (param[4] + decimal * (param[3] + decimal * (param[2] + decimal * param[1])))); + MS_STQ_F32(dst, decimal_exp * MS_CAST_F32_S32(int_exp)); +} +#endif + +#if defined(ENABLE_AVX) +static inline void simd_exp_avx(MS_FLOAT32X8 input, float *dst) { + static MS_FLOAT32X8 maxv = {88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f}; + static MS_FLOAT32X8 minv = {-88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f}; + static MS_FLOAT32X8 param[] = { + {0.693147f, 0.693147f, 0.693147f, 0.693147f, 0.693147f, 0.693147f, 0.693147f, 0.693147f}, + {1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120}, + {1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24}, + {1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6}, + {0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f}, + {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}; + input = MS_MAX256_F32(minv, MS_MIN256_F32(input, maxv)); + MS_INT32X8 integer = MS_CVT256PS_EPI32(input / param[0]); + MS_FLOAT32X8 decimal = input - MS_CVT256EPI32_PS(integer) * param[0]; + MS_INT32X8 int_exp = MS_SLLI256_EPI32(MS_ADD256_EPI32(integer, MS_MOV256_EPI32(127)), 23); + MS_FLOAT32X8 decimal_exp = + param[5] + + decimal * (param[5] + decimal * (param[4] + decimal * (param[3] + decimal * (param[2] + decimal * param[1])))); + MS_ST256_F32(dst, decimal_exp * MS_CAST256_F32_S32(int_exp)); +} +#endif + +static inline void single_exp(float src, float *dst) { + typedef union { + float f; + int i; + } fi; + static float param[] = {0.693147f, 1.0f / 120, 1.0f / 24, 1.0f / 6, 1.0f / 2, 1.0f}; // log(2.0f) + src = MSMAX(-88.0f, MSMIN(88.0f, src)); + int integer = src / param[0]; + float decimal = src - integer * param[0]; + fi int_exp = {.i = (integer + 127) << 23}; + float decimal_exp = + 1.0f + decimal * (1.0f + decimal * (0.5f + decimal * (param[3] + decimal * (param[2] + decimal * param[1])))); + *dst = int_exp.f * decimal_exp; +} +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_EXP_H_ diff --git a/mindspore/lite/nnacl/fp32/gatherNd_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/gatherNd_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/gatherNd_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/gatherNd_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/gatherNd_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/gatherNd_fp32.h new file mode 100644 index 0000000000..1c165ea0ba --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/gatherNd_fp32.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_GATHERND_H_ +#define MINDSPORE_NNACL_GATHERND_H_ + +#include "nnacl/op_base.h" + +typedef struct GatherNdParameter { + // Primitive parameter + OpParameter op_parameter_; +} GatherNdParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int GatherNd(const float *input, float *output, const int *in_offset, int area, int count); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_GATHERND_H_ diff --git a/mindspore/lite/nnacl/fp32/gru_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/gru_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/gru_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/gru_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/gru_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/gru_fp32.h new file mode 100644 index 0000000000..7beb77ed29 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/gru_fp32.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_GRU_FP32_H_ +#define MINDSPORE_NNACL_FP32_GRU_FP32_H_ +#include "nnacl/gru_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Gru(float *output, const float *input, const float *weight_g, const float *weight_r, const float *input_bias, + const float *state_bias, float *hidden_state, float *buffer[4], int check_seq_len, + const GruParameter *gru_parm); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_GRU_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/instance_norm_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/instance_norm_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/instance_norm_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/instance_norm_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/instance_norm_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/instance_norm_fp32.h new file mode 100644 index 0000000000..b0bf3bf64c --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/instance_norm_fp32.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_INSTANCE_NORM_H_ +#define MINDSPORE_NNACL_FP32_INSTANCE_NORM_H_ + +#include "nnacl/op_base.h" +#include "nnacl/instance_norm_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int InstanceNorm(const float *src_data, float *dst_data, const float *gamma_data, const float *beta_data, + const InstanceNormParameter *param, size_t task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_INSTANCE_NORM_H_ diff --git a/mindspore/lite/nnacl/fp32/invert_permutation_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/invert_permutation_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/invert_permutation_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/invert_permutation_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/invert_permutation_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/invert_permutation_fp32.h new file mode 100644 index 0000000000..dc44b14704 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/invert_permutation_fp32.h @@ -0,0 +1,27 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INVERT_PERMUTATION_FP32_H_ +#define MINDSPORE_NNACL_INVERT_PERMUTATION_FP32_H_ + +#ifdef __cplusplus +extern "C" { +#endif +void InvertPermutation(const int *input, int *output, int num); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INVERT_PERMUTATION_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/l2_norm_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/l2_norm_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/l2_norm_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/l2_norm_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/l2_norm_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/l2_norm_fp32.h new file mode 100644 index 0000000000..efca13b717 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/l2_norm_fp32.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_L2NORM_FP32_H_ +#define MINDSPORE_NNACL_FP32_L2NORM_FP32_H_ + +#include "nnacl/l2_norm_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int CalcThreadSquareSum(const float *input_ptr, float *sum, int begin, int end); +int ThreadDivSqrtSum(const float *input_ptr, float *output_ptr, const L2NormParameter *param, const float sqrt_sum, + const int begin, const int end); +int ThreadTrailingAxis(const float *input_ptr, float *output_ptr, const L2NormParameter *param, const int begin, + const int end); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_L2NORM_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/layer_norm_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/layer_norm_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/layer_norm_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/layer_norm_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/layer_norm_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/layer_norm_fp32.h new file mode 100644 index 0000000000..fcbdcf16ef --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/layer_norm_fp32.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_LAYER_NORM_FP32_H_ +#define MINDSPORE_NNACL_FP32_LAYER_NORM_FP32_H_ + +#include "nnacl/op_base.h" +#include "nnacl/layer_norm_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int LayerNorm(const float *src_data, const float *gamma_data, const float *beta_data, float *dst_data, float *out_mean, + float *out_deno, LayerNormParameter *param, size_t task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_LAYER_NORM_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/local_response_norm_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/local_response_norm_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/local_response_norm_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/local_response_norm_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/local_response_norm_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/local_response_norm_fp32.h new file mode 100644 index 0000000000..b610aabfdc --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/local_response_norm_fp32.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_LOCAL_RESPONSE_NORM_H_ +#define MINDSPORE_NNACL_LOCAL_RESPONSE_NORM_H_ + +#include "nnacl/op_base.h" + +typedef struct LocalResponseNormParameter { + // Primitive parameter + OpParameter op_parameter_; + int depth_radius_; + float bias_; + float alpha_; + float beta_; +} LocalResponseNormParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int LocalResponseNorm(const float *input_ptr, int out_size, int channel, float *output_ptr, + const LocalResponseNormParameter *param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_LOCAL_RESPONSE_NORM_H_ diff --git a/mindspore/lite/nnacl/fp32/log_softmax_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/log_softmax_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/log_softmax_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/log_softmax_fp32.c diff --git a/mindspore/lite/nnacl/fp32/log_softmax_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/log_softmax_fp32.h similarity index 100% rename from mindspore/lite/nnacl/fp32/log_softmax_fp32.h rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/log_softmax_fp32.h diff --git a/mindspore/lite/nnacl/fp32/lstm_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/lstm_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/lstm_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/lstm_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/lstm_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/lstm_fp32.h new file mode 100644 index 0000000000..f1d88bd8b5 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/lstm_fp32.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_LSTM_H_ +#define MINDSPORE_NNACL_FP32_LSTM_H_ + +#include "nnacl/lstm_parameter.h" +#ifdef __cplusplus +extern "C" { +#endif +void PackLstmWeight(float *dst, const float *src, int batch, int deep, int col, int col_align); + +void PackLstmBias(float *dst, const float *src, int batch, int col, int col_align, bool is_bidirectional); + +void PackLstmInput(const float *src, float *dst, int row, int deep); + +void LstmMatMul(float *c, const float *a, const float *b, const float *bias, int row, int deep, int col, bool is_vec); + +void ElementMulAcc(const float *input0, const float *input1, float *output, int element_size); + +int ElementOptMulAcc(const float *input0, const float input1, float *output, const int element_size); + +void Lstm(float *output, const float *input, const float *weight_i, const float *weight_h, const float *input_bias, + const float *state_bias, float *hidden_state, float *cell_state, float *buffer[6], + const LstmParameter *lstm_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_LSTM_H_ diff --git a/mindspore/lite/nnacl/fp32/matmul_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/matmul_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/matmul_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/matmul_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/matmul_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/matmul_fp32.h new file mode 100644 index 0000000000..b7bd1912ee --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/matmul_fp32.h @@ -0,0 +1,83 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_MATMUL_H_ +#define MINDSPORE_NNACL_FP32_MATMUL_H_ + +#include +#include +#include "nnacl/errorcode.h" +#include "nnacl/matmul_parameter.h" +#include "nnacl/op_base.h" + +#define ADD_BIAS(value, bias, c) \ + if (bias != NULL) value = value + bias[c]; + +#define DO_RELU(value, act_type) \ + if (act_type == ActType_Relu) value = MSMAX(0.0f, value); + +#define DO_RELU6(value, act_type) \ + if (act_type == ActType_Relu6) value = MSMIN(6.0f, value); \ + if (act_type == ActType_Relu6) value = MSMAX(0.0f, value); + +#ifdef __cplusplus +extern "C" { +#endif +void MatMulOpt(const float *a, const float *b, float *c, const float *bias, ActType act_type, int deep, int row, + int col, size_t stride, int out_type); +void MatVecMulFp32(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int col); + +void RowMajor2ColMajor(const float *src_ptr, float *dst_ptr, int row, int col); +void RowMajor2Row4Major(const float *src_ptr, float *dst_ptr, int row, int col); +void RowMajor2Row6Major(const float *src_ptr, float *dst_ptr, int row, int col); +void RowMajor2Row8Major(const float *src_ptr, float *dst_ptr, int row, int col); +void RowMajor2Row12Major(const float *src_ptr, float *dst_ptr, int row, int col); +void RowMajor2Row16Major(const float *src_ptr, float *dst_ptr, int row, int col); +void RowMajor2Col4Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); +void RowMajor2Col6Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); +void RowMajor2Col8Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); +void RowMajor2Col12Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); +void RowMajor2Col16Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); + +#ifdef ENABLE_ARM64 +void MatmulFloatNeon64(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, + int col, size_t stride, size_t writeNhwc, size_t WriteWino); +void MatmulFloatNeon64Opt(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, + int col, size_t stride, size_t write_mode); +#elif ENABLE_ARM32 +void MatmulFloatNeon32(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, + int col, int stride, size_t writeNhwc, size_t WriteWino); +void MatmulFloatNeon32Opt(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, + int col, int stride, int write_mode); +void MatmulFloatNeon32Opt12x4(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, + int row, int col, int stride, int write_mode); +#elif ENABLE_SSE +#include +void MatmulFloatSse64(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, + int col, int stride, size_t writeNhwc, size_t WriteWino); +void MatmulFloatSse64Opt(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, + int col, int stride, int write_mode); +#ifdef ENABLE_AVX +void MatmulFloatAvxOpt(const float *a, const float *b, float *c, const float *bias, size_t act_type, size_t depth, + size_t row, size_t col, size_t stride, size_t write_mode); +#endif +#endif + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_MATMUL_H_ diff --git a/mindspore/lite/nnacl/fp32/mul_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/mul_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/mul_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/mul_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/mul_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/mul_fp32.h new file mode 100644 index 0000000000..7a6a54c605 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/mul_fp32.h @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_MUL_H_ +#define MINDSPORE_NNACL_FP32_MUL_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/base/arithmetic_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ElementMul(const float *in0, const float *in1, float *out, int size); +int ElementMulRelu(const float *in0, const float *in1, float *out, int size); +int ElementMulRelu6(const float *in0, const float *in1, float *out, int size); +int ElementMulInt(const int *in0, const int *in1, int *out, int size); +int ElementMulReluInt(const int *in0, const int *in1, int *out, int size); +int ElementMulRelu6Int(const int *in0, const int *in1, int *out, int size); +int ElementOptMul(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptMulRelu(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptMulRelu6(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptMulInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); +int ElementOptMulReluInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); +int ElementOptMulRelu6Int(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); +int BroadcastMul(const float *in0, const float *in1, float *tile_in0, float *tile_in1, float *out, int size, + ArithmeticParameter *param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_MUL_H_ diff --git a/mindspore/lite/nnacl/fp32/one_hot_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/one_hot_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/one_hot_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/one_hot_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/one_hot_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/one_hot_fp32.h new file mode 100644 index 0000000000..5586c2c1d8 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/one_hot_fp32.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_ONE_HOT_H_ +#define MINDSPORE_NNACL_FP32_ONE_HOT_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" + +typedef struct OneHotParameter { + // Primitive parameter + OpParameter op_parameter_; + int axis_; + // other parameter + int depth_; + float on_value_; + float off_value_; + int outer_size_; + int inner_size_; + bool support_neg_index_; // if true, support neg index in indices tensor; if false, set off_value on neg index. +} OneHotParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int OneHot(const int *indices, float *output, const OneHotParameter *one_hot_param, const int tid, + const int thread_num); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_ONE_HOT_H_ diff --git a/mindspore/lite/nnacl/fp32/pack_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pack_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/pack_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pack_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pack_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pack_fp32.h new file mode 100644 index 0000000000..af43b80b08 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pack_fp32.h @@ -0,0 +1,70 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_PACK_H_ +#define MINDSPORE_NNACL_FP32_PACK_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void PackHWCToWHC(const float *src, float *dst, int height, int width, int channel); +void PackNHWCToNC4HW4Fp32(const void *src, void *dst, int batch, int plane, int channel); +void PackNCHWToNC4HW4Fp32(const void *src, void *dst, int batch, int plane, int channel); +void PackNHWCToNHWC4Fp32(const void *src, void *dst, int batch, int plane, int channel); +void PackNHWCToNHWC8Fp32(const void *src, void *dst, int batch, int plane, int channel); +// Note: If not multithreaded, please set task_id = 0 and thread_count = 0; +void PackNHWCToNCHWFp32(const void *src, void *dst, int batch, int plane, int channel, int task_id, int thread_count); +void PackNCHWToNHWCFp32(const void *src, void *dst, int batch, int plane, int channel, int task_id, int thread_count); +void PackNHWC4ToNHWCFp32(const void *src, void *dst, int batch, int plane, int channel); +void PackNC4HW4ToNHWC4Fp32(const void *src, void *dst, int batch, int plane, int channel); +void PackNC4HW4ToNHWCFp32(const void *src, void *dst, int batch, int plane, int channel); +void PackNHWCToC8HWN8Fp32(const void *src, void *dst, int batch, int plane, int channel); + +void PackWeightKHWToHWKFp32(const void *src, void *dst, int plane, int channel); +void PackDepthwiseIndirectWeightC4Fp32(const void *src, void *dst, int height, int width, int channel); +void PackDepthwiseIndirectWeightC8Fp32(const void *src, void *dst, int height, int width, int channel); +void Im2ColPackUnitFp32(const float *input_data, const ConvParameter *conv_param, float *packed_input, int real_cal_num, + int block_index); + +#if defined(ENABLE_ARM) || (defined(ENABLE_SSE) && !defined(ENABLE_AVX)) +void PackWeightConvDw3x3Fp32(const void *src, void *dst, int channel); +#endif + +// Transpose 8X8 Fp32 block data +typedef void (*Transpose8X8Fp32Func)(const float *src_ptr, float *dst_ptr, int src_stride, int dst_stride); +#ifdef ENABLE_ARM64 +void Transpose8X8Fp32Arm64(const float *src_ptr, float *dst_ptr, int src_stride, int dst_stride); +#endif +#ifdef ENABLE_ARM32 +void Transpose8X8Fp32Arm32(const float *src_ptr, float *dst_ptr, int src_stride, int dst_stride); +#endif +#ifdef ENABLE_AVX +void Transpose8X8Fp32Avx(const float *src_ptr, float *dst_ptr, int src_stride, int dst_stride); +#endif +#if defined(ENABLE_SSE) && !defined(ENABLE_AVX) +void Transpose8X8Fp32Sse(const float *src_ptr, float *dst_ptr, int src_stride, int dst_stride); +#endif + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_PAD_H_ diff --git a/mindspore/lite/nnacl/fp32/pad_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pad_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/pad_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pad_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pad_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pad_fp32.h new file mode 100644 index 0000000000..d353477a1c --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pad_fp32.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_PAD_H_ +#define MINDSPORE_NNACL_FP32_PAD_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include +#include +#include "nnacl/op_base.h" +#include "nnacl/pad_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Pad(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, + const int *paddings, int tid, int thread_num); +void MirrorPad(const float *input_data, float *output_data, const int *input_shape, const PadParameter *pad_param, + int begin, int end); + +int TransOut2InputDimIndex(int out_dim_index, int left_pad, int in_dim, int offset); +int GetInputFlattenIndex(int out_flatten_index, const int *input_shape, const PadParameter *pad_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_PAD_H_ diff --git a/mindspore/lite/nnacl/fp32/pooling_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pooling_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/pooling_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pooling_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pooling_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pooling_fp32.h new file mode 100644 index 0000000000..868afdbb80 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/pooling_fp32.h @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_POOLING_H_ +#define MINDSPORE_NNACL_FP32_POOLING_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/pooling_parameter.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif +int AvgPooling(const float *input_ptr, float *output_ptr, const PoolingParameter *pooling_param, int task_id, + float minf, float maxf); +void MaxPooling(const float *input_ptr, float *output_ptr, const PoolingParameter *pooling_param, int task_id, + float minf, float maxf); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_POOLING_H_ diff --git a/mindspore/lite/nnacl/fp32/power_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/power_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/power_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/power_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/power_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/power_fp32.h new file mode 100644 index 0000000000..928c3cd9de --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/power_fp32.h @@ -0,0 +1,53 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_POWER_FP32_H_ +#define MINDSPORE_NNACL_FP32_POWER_FP32_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/power_parameter.h" + +#if defined(ENABLE_ARM) || defined(ENABLE_AVX) || defined(ENABLE_SSE) +typedef MS_FLOAT32X4 (*PowerSimdFun)(MS_FLOAT32X4 x, const void *exponent); +#endif +typedef void (*PowerFun)(const float *, const float *, float *, int, float, float); +typedef float (*PowerScalarFun)(float x, const void *exponent); + +#ifdef __cplusplus +extern "C" { +#endif +static inline bool CheckInteger(float f) { return floorf(f) == f; } + +static inline float StdPowerScalar(float x, const void *exponent) { return powf(x, *(float *)exponent); } + +#if defined(ENABLE_ARM) || defined(ENABLE_AVX) || defined(ENABLE_SSE) +static inline MS_FLOAT32X4 StdPowerSimd(MS_FLOAT32X4 x, const void *exponent) { + MS_FLOAT32X4 result; + for (int i = 0; i < 4; ++i) { + result[i] = powf(x[i], *(float *)exponent); + } + return result; +} +#endif +int Power(const float *input, const float *exponent, float *output, int len, float scale, float shift, bool broadcast); +void PowerSingle(const float *input, const float *exponent, float *output, int len, float scale, float shift); +void PowerBroadCast(const float *input, const float *exponent, float *output, int len, float scale, float shift); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_POWER_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/prelu_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/prelu_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/prelu_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/prelu_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/prelu_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/prelu_fp32.h new file mode 100644 index 0000000000..9d6701e55f --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/prelu_fp32.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_PRELU_H_ +#define MINDSPORE_NNACL_FP32_PRELU_H_ + +#include "nnacl/op_base.h" +#include "nnacl/prelu_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void PRelu(const float *input, float *output, float *slope, int start, int end, int channel); + +void PReluShareChannel(const float *input, float *output, float slope, int start, int end); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_PRELU_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/prior_box_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/prior_box_fp32.h new file mode 100644 index 0000000000..43116a1f0e --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/prior_box_fp32.h @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_PRIOR_BOX_FP32_H_ +#define MINDSPORE_NNACL_FP32_PRIOR_BOX_FP32_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/prior_box_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +static int PriorBox(const float *input_data, float *output_data, const size_t size, const int tid, + const int thread_num) { + size_t unit_size = size / thread_num; + size_t copy_size = (tid == thread_num - 1) ? size - unit_size * tid : unit_size; + (void)memcpy(output_data + tid * unit_size, input_data + tid * unit_size, copy_size * sizeof(float)); + return NNACL_OK; +} +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_PRIOR_BOX_FP32_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/range_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/range_fp32.h new file mode 100644 index 0000000000..7cf8ba8a0c --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/range_fp32.h @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_RANGE_H_ +#define MINDSPORE_NNACL_RANGE_H_ + +#include "nnacl/op_base.h" + +typedef struct RangeParameter { + // Primitive parameter + OpParameter op_parameter_; + int dType_; + int start_; + int limit_; + int delta_; +} RangeParameter; + +#ifdef __cplusplus +extern "C" { +#endif +inline void Range(float *output_ptr, float start, float delta, int nums) { + for (int i = 0; i < nums; ++i, start += delta) { + output_ptr[i] = start; + } +} + +inline void RangeInt(int *output_ptr, int start, int delta, int nums) { + for (int i = 0; i < nums; ++i, start += delta) { + output_ptr[i] = start; + } +} + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_RANGE_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/rank_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/rank_fp32.h new file mode 100644 index 0000000000..7740e045ed --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/rank_fp32.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_RANK_H_ +#define MINDSPORE_NNACL_RANK_H_ + +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +inline void Rank(float *output, int rank) { + output[0] = (float)(rank); + return; +} +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_RANK_H_ diff --git a/mindspore/lite/nnacl/fp32/reduce_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reduce_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/reduce_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reduce_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reduce_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reduce_fp32.h new file mode 100644 index 0000000000..d772edf8b1 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reduce_fp32.h @@ -0,0 +1,58 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_REDUCE_H_ +#define MINDSPORE_NNACL_FP32_REDUCE_H_ +#include "nnacl/op_base.h" +#include "nnacl/reduce_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int ReduceMean(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, + int thread_num); +int IntReduceMean(int outer_size, int inner_size, int axis_size, const int *src_data, int *dst_data, int tid, + int thread_num); +int ReduceSum(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, + int thread_num); +int IntReduceSum(int outer_size, int inner_size, int axis_size, const int *src_data, int *dst_data, int tid, + int thread_num); +int ReduceMax(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, + int thread_num); +int IntReduceMax(int outer_size, int inner_size, int axis_size, const int *src_data, int *dst_data, int tid, + int thread_num); +int ReduceMin(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, + int thread_num); +int IntReduceMin(int outer_size, int inner_size, int axis_size, const int *src_data, int *dst_data, int tid, + int thread_num); +int ReduceProd(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, + int thread_num); +int IntReduceProd(int outer_size, int inner_size, int axis_size, const int *src_data, int *dst_data, int tid, + int thread_num); +int ReduceSumSquare(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, + int thread_num); +int ReduceAll(int outer_size, int inner_size, int axis_size, const bool *src_data, bool *dst_data, int tid, + int thread_num); + +#ifdef ENABLE_NNACL_INFER_SHAPE +int ReduceInferShape(int **in_shape, size_t *dim_size, int *out_shape, int *in_format, int *out_format, + int *in_datatype, int *out_datatype, OpParameter *param); +#endif +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_REDUCE_H_ diff --git a/mindspore/lite/nnacl/fp32/resize_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/resize_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/resize_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/resize_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/resize_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/resize_fp32.h new file mode 100644 index 0000000000..7c8d787c83 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/resize_fp32.h @@ -0,0 +1,69 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_RESIZE_H_ +#define MINDSPORE_NNACL_FP32_RESIZE_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +typedef float (*CalculateOriginalCoordinate)(int x_resized, int length_original, int length_resized); + +int PrepareResizeBilinear(const int *input_shape, const int *output_shape, CalculateOriginalCoordinate calculate, + int *y_bottoms, int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, + float *x_left_weights); + +int PrepareResizeBicubic(const int *input_shape, const int *output_shape, CalculateOriginalCoordinate calculate, + int *y_tops, int *x_lefts, float *y_weights, float *x_weights, float cubic_coeff); + +int ResizeBilinear(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, + const int *y_bottoms, const int *y_tops, const int *x_lefts, const int *x_rights, + const float *y_bottom_weights, const float *x_left_weights, float *line0, float *line1, + const int h_begin, const int h_end); + +int ResizeBicubic(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, + const int *y_tops, const int *x_lefts, const float *y_weights, const float *x_weights, + float *line_buffer, const int h_begin, const int h_end); + +int PrepareCropAndResizeBilinear(const int *input_shape, const float *boxes, const int *box_idx, + const int *output_shape, int *y_bottoms, int *y_tops, int *x_lefts, int *x_rights, + float *y_bottom_weights, float *x_left_weights); + +int CropAndResizeBilinear(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, + const int *y_bottoms, const int *y_tops, const int *x_lefts, const int *x_rights, + const float *y_bottom_weights, const float *x_left_weights, float *line0, float *line1, + const int h_begin, const int h_end); + +int ResizeNearestNeighbor(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, + CalculateOriginalCoordinate calculate, int coordinate_transform_mode, int tid, + int thread_num); + +float CalculateAsymmetric(int x_resized, int length_original, int length_resized); + +float CalculateAlignCorners(int x_resized, int length_original, int length_resized); + +float CalculateHalfPixel(int x_resized, int length_original, int length_resized); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_RESIZE_H_ diff --git a/mindspore/lite/nnacl/fp32/reverse_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reverse_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/reverse_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reverse_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reverse_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reverse_fp32.h new file mode 100644 index 0000000000..ccefb9bc19 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reverse_fp32.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_REVERSE_H_ +#define MINDSPORE_NNACL_REVERSE_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#define REVERSE_SHAPE_MAX_SIZE 4 + +// For reverse. +typedef struct ReverseParameter { + OpParameter op_parameter_; + int axis_[REVERSE_SHAPE_MAX_SIZE]; + int num_axis_; +} ReverseParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int Reverse(const float *input, float *output, size_t elem_size, int *index); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_REVERSE_H_ diff --git a/mindspore/lite/nnacl/fp32/reverse_sequence_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reverse_sequence_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/reverse_sequence_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reverse_sequence_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reverse_sequence_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reverse_sequence_fp32.h new file mode 100644 index 0000000000..727659f787 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/reverse_sequence_fp32.h @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_REVERSE_SEQUENCE_H_ +#define MINDSPORE_NNACL_FP32_REVERSE_SEQUENCE_H_ + +#include +#include "nnacl/common_func.h" +#include "nnacl/op_base.h" +#include "nnacl/reverse_sequence_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void ReverseSequence(float *input0, const void *input1, float *output, ReverseSequenceParameter *para); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_REVERSE_SEQUENCE_H_ diff --git a/mindspore/lite/nnacl/fp32/roi_pooling_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/roi_pooling_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/roi_pooling_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/roi_pooling_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/roi_pooling_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/roi_pooling_fp32.h new file mode 100644 index 0000000000..d7d1c79e41 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/roi_pooling_fp32.h @@ -0,0 +1,54 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_ROI_POOLING_H_ +#define MINDSPORE_NNACL_FP32_ROI_POOLING_H_ + +#include "nnacl/op_base.h" + +typedef struct ROIPoolingParameter { + // primitive parameter + OpParameter op_parameter_; + int pooledW_; + int pooledH_; + float scale_; + + // shape correlative + int in_strides_[DIMENSION_4D]; + int out_strides_[DIMENSION_4D]; + int ndim_; + int input_w_; + int input_h_; + int input_n_; + int input_c_; + int output_w_; + int output_h_; + int output_n_; + int output_c_; + + // other parameter + int thread_num_; +} ROIPoolingParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int ROIPooling(const float *in_ptr, float *out_ptr, const float *roi, float *max_c, int tid, + const ROIPoolingParameter *param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_ROI_POOLING_H_ diff --git a/mindspore/lite/nnacl/fp32/scale_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/scale_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/scale_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/scale_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/scale_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/scale_fp32.h new file mode 100644 index 0000000000..952ded821a --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/scale_fp32.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_SCALE_FP32_H_ +#define MINDSPORE_NNACL_SCALE_FP32_H_ + +#include "nnacl/op_base.h" +#include "nnacl/scale.h" +#ifdef __cplusplus +extern "C" { +#endif +void DoScale(const float *in_data, float *out_data, const float *scale, const float *offset, int task_id, + const ScaleParameter *scale_param); +void DoScaleRelu(const float *in_data, float *out_data, const float *scale, const float *offset, int task_id, + const ScaleParameter *scale_param); +void DoScaleRelu6(const float *in_data, float *out_data, const float *scale, const float *offset, int task_id, + const ScaleParameter *scale_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_SCALE_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/scatter_nd_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/scatter_nd_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/scatter_nd_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/scatter_nd_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/scatter_nd_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/scatter_nd_fp32.h new file mode 100644 index 0000000000..626775fc5f --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/scatter_nd_fp32.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_SCATTER_ND_FP32_H_ +#define MINDSPORE_NNACL_FP32_SCATTER_ND_FP32_H_ + +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +int DoScatterND(float *output_ptr, const float *update, int *output_unit_offsets, int unit_size, int num_units); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_SCATTER_ND_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/softmax_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/softmax_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/softmax_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/softmax_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/softmax_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/softmax_fp32.h new file mode 100644 index 0000000000..70424aed5f --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/softmax_fp32.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_SOFTMAX_H_ +#define MINDSPORE_NNACL_FP32_SOFTMAX_H_ + +#include "nnacl/op_base.h" +#include "nnacl/softmax_parameter.h" +#ifdef __cplusplus +extern "C" { +#endif +void Softmax(const float *input_ptr, float *output_ptr, float *sum_data, const SoftmaxParameter *parameter); +void SoftmaxLastAxis(const float *src, float *dst, int batch, int channel); +void SoftmaxNorm(const float *src, float *dst, int batch, int channel); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_SOFTMAX_H_ diff --git a/mindspore/lite/nnacl/fp32/space_to_batch_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/space_to_batch_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/space_to_batch_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/space_to_batch_fp32.c diff --git a/mindspore/lite/nnacl/fp32/space_to_batch_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/space_to_batch_fp32.h similarity index 100% rename from mindspore/lite/nnacl/fp32/space_to_batch_fp32.h rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/space_to_batch_fp32.h diff --git a/mindspore/lite/nnacl/fp32/sparse_to_dense_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/sparse_to_dense_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/sparse_to_dense_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/sparse_to_dense_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/sparse_to_dense_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/sparse_to_dense_fp32.h new file mode 100644 index 0000000000..1e8f903413 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/sparse_to_dense_fp32.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_SPARSETODENSE_H_ +#define MINDSPORE_NNACL_FP32_SPARSETODENSE_H_ + +#include "nnacl/sparse_to_dense_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void SparseToDense(int **sparse_indices_vect, const int *output_shape, const float *sparse_values, float default_value, + float *output, bool isScalar, int index_start, int index_end, int out_width); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_SPARSETODENSE_H_ diff --git a/mindspore/lite/nnacl/fp32/splice_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/splice_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/splice_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/splice_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/splice_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/splice_fp32.h new file mode 100644 index 0000000000..9b21b41ac3 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/splice_fp32.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_SPLICE_FP32_H_ +#define MINDSPORE_NNACL_FP32_SPLICE_FP32_H_ +#include +#include "nnacl/splice_parameter.h" +#ifdef __cplusplus +extern "C" { +#endif + +void SpliceFp32(const float *src_data, int src_row, int src_col, const SpliceParameter *splice_parameter, + float *dst_data, int dst_row, int dst_col); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_FP32_SPLICE_FP32_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/squared_difference.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/squared_difference.c new file mode 100644 index 0000000000..d842e1a956 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/squared_difference.c @@ -0,0 +1,28 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SQUARED_DIFFERENCE_H_ +#define MINDSPORE_NNACL_SQUARED_DIFFERENCE_H_ + +#include "nnacl/fp32/squared_difference.h" +#include "nnacl/fp32/sub_fp32.h" +#include "nnacl/fp32/mul_fp32.h" + +int ElementSquaredDifference(const float *in0, const float *in1, float *out, int size) { + ElementSub(in0, in1, out, size); + return ElementMul(out, out, out, size); +} + +#endif // MINDSPORE_NNACL_SQUARED_DIFFERENCE_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/squared_difference.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/squared_difference.h new file mode 100644 index 0000000000..69b5c22b8a --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/squared_difference.h @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SQUARED_DIFFERENCE_H_ +#define MINDSPORE_NNACL_SQUARED_DIFFERENCE_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/base/arithmetic_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Element Squared Difference */ +int ElementSquaredDifference(const float *in0, const float *in1, float *out, int size); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_SQUARED_DIFFERENCE_H_ diff --git a/mindspore/lite/nnacl/fp32/strided_slice_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/strided_slice_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/strided_slice_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/strided_slice_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/strided_slice_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/strided_slice_fp32.h new file mode 100644 index 0000000000..d6e8a6fcbd --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/strided_slice_fp32.h @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_STRIDED_SLICE_FP32_H_ +#define MINDSPORE_NNACL_FP32_STRIDED_SLICE_FP32_H_ + +#include "nnacl/op_base.h" +#include "nnacl/strided_slice_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int DoStridedSlice(const void *inputs, void *output, StridedSliceParameter *param); + +void FastStride(const uint8_t *input, uint8_t *output, int split_len, int stride, size_t outer, size_t inner_size, + size_t in_offset); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_STRIDED_SLICE_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/sub_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/sub_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/sub_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/sub_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/sub_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/sub_fp32.h new file mode 100644 index 0000000000..8811fd27cc --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/sub_fp32.h @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SUB_FP32_H_ +#define MINDSPORE_NNACL_SUB_FP32_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/base/arithmetic_base.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ElementSub(const float *in0, const float *in1, float *out, int size); +int ElementSubInt(const int *in0, const int *in1, int *out, int size); +int ElementSubRelu(const float *in0, const float *in1, float *out, int size); +int ElementSubRelu6(const float *in0, const float *in1, float *out, int size); +int ElementOptSub(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptSubRelu(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptSubRelu6(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); +int ElementOptSubInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_SUB_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/topk_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/topk_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/topk_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/topk_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/topk_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/topk_fp32.h new file mode 100644 index 0000000000..82a03ac8e7 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/topk_fp32.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_TOPK_H_ +#define MINDSPORE_NNACL_TOPK_H_ + +#include "nnacl/op_base.h" + +typedef struct TopkNode { + float element; + int32_t index; +} TopkNode; + +typedef struct TopkParameter { + // primitive parameter + OpParameter op_parameter_; + int k_; + bool sorted_; + + // other parameter + int last_dim_size_; + int loop_num_; + void *topk_node_list_; +} TopkParameter; + +#ifdef __cplusplus +extern "C" { +#endif +void Topk(float *input_data, float *output_data, int32_t *output_index, TopkParameter *parameter); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_TOPK_H_ diff --git a/mindspore/lite/nnacl/fp32/transpose_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/transpose_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/transpose_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/transpose_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/transpose_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/transpose_fp32.h new file mode 100644 index 0000000000..6cef303c10 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/transpose_fp32.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_TRANSPOSE_H_ +#define MINDSPORE_NNACL_FP32_TRANSPOSE_H_ + +#include +#include "nnacl/transpose.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int DoTransposeFp32(const float *in_data, float *out_data, const int *output_shape, TransposeParameter *param); +void TransposeDimsFp32(const float *in_data, float *out_data, const int *output_shape, int *size, int *position, + TransposeParameter *transpose_param, int task_id, int thread_num); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_TRANSPOSE_H_ diff --git a/mindspore/lite/nnacl/fp32/unique_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/unique_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/unique_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/unique_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/unique_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/unique_fp32.h new file mode 100644 index 0000000000..2051048c74 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/unique_fp32.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_UNIQUE_H +#define MINDSPORE_NNACL_UNIQUE_H + +#include "nnacl/op_base.h" + +typedef struct UniqueParameter { + // primitive parameter + OpParameter op_parameter_; +} UniqueParameter; + +#ifdef __cplusplus +extern "C" { +#endif +void Unique(const float *input, int input_len, float *output0, int *output0_len, int *output1); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_UNIQUE_H diff --git a/mindspore/lite/nnacl/fp32/where_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/where_fp32.c similarity index 100% rename from mindspore/lite/nnacl/fp32/where_fp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/where_fp32.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/where_fp32.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/where_fp32.h new file mode 100644 index 0000000000..dbd06d6fdf --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/where_fp32.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_WHERE_Fp32_H_ +#define MINDSPORE_NNACL_FP32_WHERE_Fp32_H_ + +#include "nnacl/op_base.h" +#include "nnacl/where_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void WhereWithTripleInputs(const bool *condition, const float *x, const float *y, float *output, + WhereParameter *where_param_, int task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_WHERE_Fp32_H_ diff --git a/mindspore/lite/nnacl/fp32/winograd_transform.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/winograd_transform.c similarity index 100% rename from mindspore/lite/nnacl/fp32/winograd_transform.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/winograd_transform.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/winograd_transform.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/winograd_transform.h new file mode 100644 index 0000000000..9bfc99ebcb --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/winograd_transform.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_WINOGRAD_TRANSFORM_H_ +#define MINDSPORE_NNACL_WINOGRAD_TRANSFORM_H_ + +#ifdef ENABLE_ARM +#include +#endif +#include +#include "nnacl/pack.h" +#include "nnacl/fp32/winograd_utils.h" + +#ifdef __cplusplus +extern "C" { +#endif +// for fp32 winograd input/output transform +void WinogradInputTransform(const float *input_data, float *trans_input, float *tmp_data, int cal_num, + int out_tile_index, int out_w_block_num, const ConvParameter *conv_param, + InputTransFunc func); + +void WinogradOutputTransform(const float *gemm_out, float *out_data, const float *bias_data, int cal_num, + int out_tile_index, int output_unit_num, const ConvParameter *conv_param, + OutputTransFunc func); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_WINOGRAD_TRANSFORM_H_ diff --git a/mindspore/lite/nnacl/fp32/winograd_utils.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/winograd_utils.c similarity index 100% rename from mindspore/lite/nnacl/fp32/winograd_utils.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/winograd_utils.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/winograd_utils.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/winograd_utils.h new file mode 100644 index 0000000000..dd6d90d23e --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/winograd_utils.h @@ -0,0 +1,316 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_WINOGRAD_UTILS_H_ +#define MINDSPORE_NNACL_WINOGRAD_UTILS_H_ + +#ifdef ENABLE_ARM +#include +#endif +#include "nnacl/conv_parameter.h" +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +typedef void (*InputTransFunc)(const float *src_data, float *dst_data, int src_step, int dst_step, int real_c); + +typedef void (*OutputTransFunc)(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); + +void GeneralInputTransformUnit(const float *src_data, float *dst_data, const float *matrix_b, const float *matrix_bt, + int src_step, int dst_step, int in_unit); + +void GeneralOutputTransformUnit(const float *src_data, float *dst_data, const float *bias_data, const float *matrix_a, + const float *matrix_at, int src_step, int dst_step, int in_unit, int out_unit); + +#define Load16Data \ + src[0] = MS_LDQ_F32(src_data + 0 * src_step); \ + src[1] = MS_LDQ_F32(src_data + 1 * src_step); \ + src[2] = MS_LDQ_F32(src_data + 2 * src_step); \ + src[3] = MS_LDQ_F32(src_data + 3 * src_step); \ + src[4] = MS_LDQ_F32(src_data + 4 * src_step); \ + src[5] = MS_LDQ_F32(src_data + 5 * src_step); \ + src[6] = MS_LDQ_F32(src_data + 6 * src_step); \ + src[7] = MS_LDQ_F32(src_data + 7 * src_step); \ + src[8] = MS_LDQ_F32(src_data + 8 * src_step); \ + src[9] = MS_LDQ_F32(src_data + 9 * src_step); \ + src[10] = MS_LDQ_F32(src_data + 10 * src_step); \ + src[11] = MS_LDQ_F32(src_data + 11 * src_step); \ + src[12] = MS_LDQ_F32(src_data + 12 * src_step); \ + src[13] = MS_LDQ_F32(src_data + 13 * src_step); \ + src[14] = MS_LDQ_F32(src_data + 14 * src_step); \ + src[15] = MS_LDQ_F32(src_data + 15 * src_step); + +#define Load36Data \ + src[0] = MS_LDQ_F32(src_data + 0 * src_step); \ + src[1] = MS_LDQ_F32(src_data + 1 * src_step); \ + src[2] = MS_LDQ_F32(src_data + 2 * src_step); \ + src[3] = MS_LDQ_F32(src_data + 3 * src_step); \ + src[4] = MS_LDQ_F32(src_data + 4 * src_step); \ + src[5] = MS_LDQ_F32(src_data + 5 * src_step); \ + src[6] = MS_LDQ_F32(src_data + 6 * src_step); \ + src[7] = MS_LDQ_F32(src_data + 7 * src_step); \ + src[8] = MS_LDQ_F32(src_data + 8 * src_step); \ + src[9] = MS_LDQ_F32(src_data + 9 * src_step); \ + src[10] = MS_LDQ_F32(src_data + 10 * src_step); \ + src[11] = MS_LDQ_F32(src_data + 11 * src_step); \ + src[12] = MS_LDQ_F32(src_data + 12 * src_step); \ + src[13] = MS_LDQ_F32(src_data + 13 * src_step); \ + src[14] = MS_LDQ_F32(src_data + 14 * src_step); \ + src[15] = MS_LDQ_F32(src_data + 15 * src_step); \ + src[16] = MS_LDQ_F32(src_data + 16 * src_step); \ + src[17] = MS_LDQ_F32(src_data + 17 * src_step); \ + src[18] = MS_LDQ_F32(src_data + 18 * src_step); \ + src[19] = MS_LDQ_F32(src_data + 19 * src_step); \ + src[20] = MS_LDQ_F32(src_data + 20 * src_step); \ + src[21] = MS_LDQ_F32(src_data + 21 * src_step); \ + src[22] = MS_LDQ_F32(src_data + 22 * src_step); \ + src[23] = MS_LDQ_F32(src_data + 23 * src_step); \ + src[24] = MS_LDQ_F32(src_data + 24 * src_step); \ + src[25] = MS_LDQ_F32(src_data + 25 * src_step); \ + src[26] = MS_LDQ_F32(src_data + 26 * src_step); \ + src[27] = MS_LDQ_F32(src_data + 27 * src_step); \ + src[28] = MS_LDQ_F32(src_data + 28 * src_step); \ + src[29] = MS_LDQ_F32(src_data + 29 * src_step); \ + src[30] = MS_LDQ_F32(src_data + 30 * src_step); \ + src[31] = MS_LDQ_F32(src_data + 31 * src_step); \ + src[32] = MS_LDQ_F32(src_data + 32 * src_step); \ + src[33] = MS_LDQ_F32(src_data + 33 * src_step); \ + src[34] = MS_LDQ_F32(src_data + 34 * src_step); \ + src[35] = MS_LDQ_F32(src_data + 35 * src_step); + +#define Load64Data \ + src[0] = MS_LDQ_F32(src_data + 0 * src_step); \ + src[1] = MS_LDQ_F32(src_data + 1 * src_step); \ + src[2] = MS_LDQ_F32(src_data + 2 * src_step); \ + src[3] = MS_LDQ_F32(src_data + 3 * src_step); \ + src[4] = MS_LDQ_F32(src_data + 4 * src_step); \ + src[5] = MS_LDQ_F32(src_data + 5 * src_step); \ + src[6] = MS_LDQ_F32(src_data + 6 * src_step); \ + src[7] = MS_LDQ_F32(src_data + 7 * src_step); \ + src[8] = MS_LDQ_F32(src_data + 8 * src_step); \ + src[9] = MS_LDQ_F32(src_data + 9 * src_step); \ + src[10] = MS_LDQ_F32(src_data + 10 * src_step); \ + src[11] = MS_LDQ_F32(src_data + 11 * src_step); \ + src[12] = MS_LDQ_F32(src_data + 12 * src_step); \ + src[13] = MS_LDQ_F32(src_data + 13 * src_step); \ + src[14] = MS_LDQ_F32(src_data + 14 * src_step); \ + src[15] = MS_LDQ_F32(src_data + 15 * src_step); \ + src[16] = MS_LDQ_F32(src_data + 16 * src_step); \ + src[17] = MS_LDQ_F32(src_data + 17 * src_step); \ + src[18] = MS_LDQ_F32(src_data + 18 * src_step); \ + src[19] = MS_LDQ_F32(src_data + 19 * src_step); \ + src[20] = MS_LDQ_F32(src_data + 20 * src_step); \ + src[21] = MS_LDQ_F32(src_data + 21 * src_step); \ + src[22] = MS_LDQ_F32(src_data + 22 * src_step); \ + src[23] = MS_LDQ_F32(src_data + 23 * src_step); \ + src[24] = MS_LDQ_F32(src_data + 24 * src_step); \ + src[25] = MS_LDQ_F32(src_data + 25 * src_step); \ + src[26] = MS_LDQ_F32(src_data + 26 * src_step); \ + src[27] = MS_LDQ_F32(src_data + 27 * src_step); \ + src[28] = MS_LDQ_F32(src_data + 28 * src_step); \ + src[29] = MS_LDQ_F32(src_data + 29 * src_step); \ + src[30] = MS_LDQ_F32(src_data + 30 * src_step); \ + src[31] = MS_LDQ_F32(src_data + 31 * src_step); \ + src[32] = MS_LDQ_F32(src_data + 32 * src_step); \ + src[33] = MS_LDQ_F32(src_data + 33 * src_step); \ + src[34] = MS_LDQ_F32(src_data + 34 * src_step); \ + src[35] = MS_LDQ_F32(src_data + 35 * src_step); \ + src[36] = MS_LDQ_F32(src_data + 36 * src_step); \ + src[37] = MS_LDQ_F32(src_data + 37 * src_step); \ + src[38] = MS_LDQ_F32(src_data + 38 * src_step); \ + src[39] = MS_LDQ_F32(src_data + 39 * src_step); \ + src[40] = MS_LDQ_F32(src_data + 40 * src_step); \ + src[41] = MS_LDQ_F32(src_data + 41 * src_step); \ + src[42] = MS_LDQ_F32(src_data + 42 * src_step); \ + src[43] = MS_LDQ_F32(src_data + 43 * src_step); \ + src[44] = MS_LDQ_F32(src_data + 44 * src_step); \ + src[45] = MS_LDQ_F32(src_data + 45 * src_step); \ + src[46] = MS_LDQ_F32(src_data + 46 * src_step); \ + src[47] = MS_LDQ_F32(src_data + 47 * src_step); \ + src[48] = MS_LDQ_F32(src_data + 48 * src_step); \ + src[49] = MS_LDQ_F32(src_data + 49 * src_step); \ + src[50] = MS_LDQ_F32(src_data + 50 * src_step); \ + src[51] = MS_LDQ_F32(src_data + 51 * src_step); \ + src[52] = MS_LDQ_F32(src_data + 52 * src_step); \ + src[53] = MS_LDQ_F32(src_data + 53 * src_step); \ + src[54] = MS_LDQ_F32(src_data + 54 * src_step); \ + src[55] = MS_LDQ_F32(src_data + 55 * src_step); \ + src[56] = MS_LDQ_F32(src_data + 56 * src_step); \ + src[57] = MS_LDQ_F32(src_data + 57 * src_step); \ + src[58] = MS_LDQ_F32(src_data + 58 * src_step); \ + src[59] = MS_LDQ_F32(src_data + 59 * src_step); \ + src[60] = MS_LDQ_F32(src_data + 60 * src_step); \ + src[61] = MS_LDQ_F32(src_data + 61 * src_step); \ + src[62] = MS_LDQ_F32(src_data + 62 * src_step); \ + src[63] = MS_LDQ_F32(src_data + 63 * src_step); + +InputTransFunc GetInputTransFunc(int input_unit); + +void InputTransform4x4Unit(const float *src_data, float *dst_data, int src_step, int dst_step, int real_c); + +void InputTransform6x6Unit(const float *src_data, float *dst_data, int src_step, int dst_step, int real_c); + +void InputTransform8x8Unit(const float *src_data, float *dst_data, int src_step, int dst_step, int real_c); + +OutputTransFunc GetOutputTransFunc(int input_unit, int output_unit, ActType act_type); + +#define Store4Data \ + MS_STQ_F32(dst_data, m[0]); \ + MS_STQ_F32(dst_data + out_c, m[1]); \ + MS_STQ_F32(dst_data + dst_step * out_c, m[2]); \ + MS_STQ_F32(dst_data + dst_step * out_c + out_c, m[3]); + +#define Store9Data \ + MS_STQ_F32(dst_data, m[0]); \ + MS_STQ_F32(dst_data + out_c, m[1]); \ + MS_STQ_F32(dst_data + 2 * out_c, m[2]); \ + MS_STQ_F32(dst_data + dst_step * out_c, m[3]); \ + MS_STQ_F32(dst_data + dst_step * out_c + out_c, m[4]); \ + MS_STQ_F32(dst_data + dst_step * out_c + 2 * out_c, m[5]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c, m[6]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + out_c, m[7]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + 2 * out_c, m[8]); + +#define Store16Data \ + MS_STQ_F32(dst_data, m[0]); \ + MS_STQ_F32(dst_data + out_c, m[1]); \ + MS_STQ_F32(dst_data + 2 * out_c, m[2]); \ + MS_STQ_F32(dst_data + 3 * out_c, m[3]); \ + MS_STQ_F32(dst_data + dst_step * out_c, m[4]); \ + MS_STQ_F32(dst_data + dst_step * out_c + out_c, m[5]); \ + MS_STQ_F32(dst_data + dst_step * out_c + 2 * out_c, m[6]); \ + MS_STQ_F32(dst_data + dst_step * out_c + 3 * out_c, m[7]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c, m[8]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + out_c, m[9]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + 2 * out_c, m[10]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + 3 * out_c, m[11]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c, m[12]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c + out_c, m[13]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c + 2 * out_c, m[14]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c + 3 * out_c, m[15]); + +#define Store25Data \ + MS_STQ_F32(dst_data, m[0]); \ + MS_STQ_F32(dst_data + out_c, m[1]); \ + MS_STQ_F32(dst_data + 2 * out_c, m[2]); \ + MS_STQ_F32(dst_data + 3 * out_c, m[3]); \ + MS_STQ_F32(dst_data + 4 * out_c, m[4]); \ + MS_STQ_F32(dst_data + dst_step * out_c, m[5]); \ + MS_STQ_F32(dst_data + dst_step * out_c + out_c, m[6]); \ + MS_STQ_F32(dst_data + dst_step * out_c + 2 * out_c, m[7]); \ + MS_STQ_F32(dst_data + dst_step * out_c + 3 * out_c, m[8]); \ + MS_STQ_F32(dst_data + dst_step * out_c + 4 * out_c, m[9]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c, m[10]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + out_c, m[11]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + 2 * out_c, m[12]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + 3 * out_c, m[13]); \ + MS_STQ_F32(dst_data + 2 * dst_step * out_c + 4 * out_c, m[14]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c, m[15]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c + out_c, m[16]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c + 2 * out_c, m[17]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c + 3 * out_c, m[18]); \ + MS_STQ_F32(dst_data + 3 * dst_step * out_c + 4 * out_c, m[19]); \ + MS_STQ_F32(dst_data + 4 * dst_step * out_c, m[20]); \ + MS_STQ_F32(dst_data + 4 * dst_step * out_c + out_c, m[21]); \ + MS_STQ_F32(dst_data + 4 * dst_step * out_c + 2 * out_c, m[22]); \ + MS_STQ_F32(dst_data + 4 * dst_step * out_c + 3 * out_c, m[23]); \ + MS_STQ_F32(dst_data + 4 * dst_step * out_c + 4 * out_c, m[24]); + +void OutputTransform4x2Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform4x2ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform4x2Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform4x3Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform4x3ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform4x3Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); + +void OutputTransform6x2Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x2ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x2Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x3Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x3ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x3Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x4Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x4ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x4Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x5Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x5ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform6x5Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); + +void OutputTransform8x2Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x2ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x2Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x3Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x3ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x3Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x4Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x4ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x4Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x5Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x5ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x5Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x6ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x6Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x7Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, + int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x7ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); +void OutputTransform8x7Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, + int dst_step, int out_c, int r_w, int r_h, int r_c); + +int SelectOutputUnit(ConvParameter *conv_param); + +bool CheckIfUseWinograd(int *output_unit, ConvParameter *conv_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_WINOGRAD_UTILS_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/activation_grad.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/activation_grad.c similarity index 100% rename from mindspore/lite/nnacl/fp32_grad/activation_grad.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/activation_grad.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/activation_grad.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/activation_grad.h new file mode 100644 index 0000000000..8317571386 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/activation_grad.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_GRAD_ACTIVATION_GRAD_H_ +#define MINDSPORE_NNACL_FP32_GRAD_ACTIVATION_GRAD_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/fp32/arithmetic_fp32.h" +#include "nnacl/errorcode.h" + +typedef struct ActivationGradParameter { + OpParameter op_parameter; + int type_; + float alpha_; +} ActivationGradParameter; +#ifdef __cplusplus +extern "C" { +#endif + +int ReluGrad(float *src0, float *src1, size_t length, float *dst); +int Relu6Grad(float *src0, float *src1, size_t length, float *dst); +int LReluGrad(float *src0, float *src1, size_t length, float *dst, float alpha); +int SigmoidGrad(float *src0, float *src1, size_t length, float *dst); +int TanhGrad(float *src0, float *src1, size_t length, float *dst); +int HSwishGrad(float *src0, float *src1, size_t length, float *dst); +int HSigmoidGrad(float *src0, float *src1, size_t length, float *dst); +int EluGrad(float *src0, float *src1, size_t length, float *dst, float alpha); +int GeluGrad(float *src0, float *src1, size_t length, float *dst); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_GRAD_ACTIVATION_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/arithmetic_grad.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/arithmetic_grad.c similarity index 100% rename from mindspore/lite/nnacl/fp32_grad/arithmetic_grad.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/arithmetic_grad.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/arithmetic_grad.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/arithmetic_grad.h new file mode 100644 index 0000000000..2cbf041e9d --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/arithmetic_grad.h @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_GRAD_ARITHMETIC_GRAD_H_ +#define MINDSPORE_NNACL_FP32_GRAD_ARITHMETIC_GRAD_H_ + +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +void ElementDivNegSquare(const float *nom, const float *denom, float *output, int element_size); +void ElementMulAndDivNegSquare(const float *a, const float *b, const float *denom, float *output, int element_size); +int ElementAbsGrad(const float *in1, const float *in2, float *out, int element_size); +void MaximumByAxes(const float *input0, const float *input1, const float *dy, const int *input0_dims, + const int *input1_dims, const int *dy_dims, float *output0, float *output1, int num_dims); +void MinimumByAxes(const float *input0, const float *input1, const float *dy, const int *input0_dims, + const int *input1_dims, const int *dy_dims, float *output0, float *output1, int num_dims); +int ElementSqrtGrad(const float *in1, const float *in2, float *out, const int element_size); +int ElementRsqrtGrad(const float *in1, const float *in2, float *out, const int element_size); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_GRAD_ARITHMETIC_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/batch_norm.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/batch_norm.c similarity index 100% rename from mindspore/lite/nnacl/fp32_grad/batch_norm.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/batch_norm.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/batch_norm.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/batch_norm.h new file mode 100644 index 0000000000..c701e02308 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/batch_norm.h @@ -0,0 +1,42 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_GRAD_BATCH_NORM_H_ +#define MINDSPORE_NNACL_FP32_GRAD_BATCH_NORM_H_ + +#include "nnacl/op_base.h" + +typedef struct BNGradParameter { + OpParameter op_parameter_; + float epsilon_; +} BNGradParameter; + +#ifdef __cplusplus +extern "C" { +#endif + +void var2Invar(float *save_var, int size, float eps); +void backwardAll(const float *in, const float *yt, const float *mean, const float *invar, const float *scale, int size, + int ch, float *dxhat_sum, float *dxhathat_sum, float *dbias, float *dscale, float *dx); +void backwardP1(const float *in, const float *yt, const float *mean, const float *invar, const float *scale, int size, + int ch, float *dxhat_sum, float *dxhathat_sum, float *dbias, float *dscale); +void backwardP2(const float *in, const float *yt, const float *mean, const float *invar, const float *scale, int size, + int total_size, int ch, const float *dxhat_sum, const float *dxhathat_sum, float *dx); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_GRAD_BATCH_NORM_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/binary_cross_entropy.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/binary_cross_entropy.c similarity index 100% rename from mindspore/lite/nnacl/fp32_grad/binary_cross_entropy.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/binary_cross_entropy.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/binary_cross_entropy.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/binary_cross_entropy.h new file mode 100644 index 0000000000..ce3581f890 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/binary_cross_entropy.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_BINARY_CROSS_ENTROPY_H_ +#define MINDSPORE_NNACL_BINARY_CROSS_ENTROPY_H_ + +#include "nnacl/op_base.h" + +typedef struct BinaryCrossEntropyParameter { + OpParameter op_parameter_; + int reduction; +} BinaryCrossEntropyParameter; + +#ifdef __cplusplus +extern "C" { +#endif + +void BinaryCrossEntropy(const int input_size, const int reduction, const float *input_x, const float *input_y, + const float *weight, float *loss, float *tmp_loss); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_BINARY_CROSS_ENTROPY_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/binary_cross_entropy_grad.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/binary_cross_entropy_grad.c similarity index 100% rename from mindspore/lite/nnacl/fp32_grad/binary_cross_entropy_grad.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/binary_cross_entropy_grad.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/binary_cross_entropy_grad.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/binary_cross_entropy_grad.h new file mode 100644 index 0000000000..57a7785a14 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/binary_cross_entropy_grad.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_BINARY_CROSS_ENTROPY_GRAD_H_ +#define MINDSPORE_NNACL_BINARY_CROSS_ENTROPY_GRAD_H_ + +#include "nnacl/op_base.h" + +typedef struct BinaryCrossEntropyGradParameter { + OpParameter op_parameter_; + int reduction; +} BinaryCrossEntropyGradParameter; + +#ifdef __cplusplus +extern "C" { +#endif + +int BinaryCrossEntropyGrad(const int input_size, const int reduction, const float *input_x, const float *input_y, + const float *weight, const float *dloss, float *dx); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_BINARY_CROSS_ENTROPY_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/convolution_grad_filter.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/convolution_grad_filter.c similarity index 100% rename from mindspore/lite/nnacl/fp32_grad/convolution_grad_filter.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/convolution_grad_filter.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/convolution_grad_filter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/convolution_grad_filter.h new file mode 100644 index 0000000000..e4c0342ec9 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/convolution_grad_filter.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_GRAD_CONVOLUTION_GRAD_FILTER_H_ +#define MINDSPORE_NNACL_FP32_GRAD_CONVOLUTION_GRAD_FILTER_H_ + +#include +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ConvDwFilterGrad(const float *x, const float *dy, float *dw, int start, int count, const ConvParameter *conv_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_GRAD_CONVOLUTION_GRAD_FILTER_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/dropout_grad.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/dropout_grad.c similarity index 100% rename from mindspore/lite/nnacl/fp32_grad/dropout_grad.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/dropout_grad.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/dropout_grad.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/dropout_grad.h new file mode 100644 index 0000000000..82928c7cbf --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/dropout_grad.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_GRAD_DROPOUT_GRAD_H_ +#define MINDSPORE_NNACL_FP32_GRAD_DROPOUT_GRAD_H_ + +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void DropoutGrad(const float *yt_ptr, const float *mask, float *output_ptr, int length, float ratio); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_GRAD_DROPOUT_GRAD_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/dropout_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/dropout_parameter.h new file mode 100644 index 0000000000..29b988c4c8 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/dropout_parameter.h @@ -0,0 +1,27 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_GRAD_DROPOUT_PARAMETER_H_ +#define MINDSPORE_NNACL_FP32_GRAD_DROPOUT_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct DropoutParameter { + OpParameter op_parameter_; + float ratio_; +} DropoutParameter; + +#endif // MINDSPORE_NNACL_FP32_GRAD_DROPOUT_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/gemm.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/gemm.c similarity index 100% rename from mindspore/lite/nnacl/fp32_grad/gemm.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/gemm.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/gemm.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/gemm.h new file mode 100644 index 0000000000..f557609479 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/gemm.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_GRAD_GEMM_H_ +#define MINDSPORE_NNACL_FP32_GRAD_GEMM_H_ + +#include +#include "nnacl/op_base.h" +#ifdef __cplusplus +extern "C" { +#endif +typedef struct { + int ca; + int cb; + ActType atype; + float *bias; + float *mat_a; + float *mat_b; +} GemmCb; + +void GemmMatmulPlus(int ta, int tb, int M, int N, int K, float alpha, const float *mat_a, int lda, const float *mat_b, + int ldb, float beta, float *mat_c, int ldc, float *workspace, GemmCb *cb); +void GemmMatmul(int ta, int tb, int M, int N, int K, float alpha, const float *mat_a, int lda, const float *mat_b, + int ldb, float beta, float *mat_c, int ldc, float *workspace); +int MatSize(int row, int col, int round); +int MatSizeTotal(int row, int col, int deep, int inc); +void AddMatrix(const float *v1, float *v2, float beta, int row, int col, int stride); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_GRAD_GEMM_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/layernorm_grad.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/layernorm_grad.c similarity index 100% rename from mindspore/lite/nnacl/fp32_grad/layernorm_grad.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/layernorm_grad.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/layernorm_grad.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/layernorm_grad.h new file mode 100644 index 0000000000..ab9122c5c9 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/layernorm_grad.h @@ -0,0 +1,29 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_GRAD_LAYERNORM_GRAD_H_ +#define MINDSPORE_NNACL_FP32_GRAD_LAYERNORM_GRAD_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +void LayerNormGrad(const float *x, const float *dy, const float *var, const float *mean, const float *gamma, + int param_num, int param_size, int block_num, int block_size, float *dx, float *dg, float *db); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_FP32_GRAD_LAYERNORM_GRAD_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/layernormgrad_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/layernormgrad_parameter.h new file mode 100644 index 0000000000..c783ffd6c8 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/layernormgrad_parameter.h @@ -0,0 +1,27 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_GRAD_LAYERNORMGRAD_PARAMETER_H_ +#define MINDSPORE_NNACL_FP32_GRAD_LAYERNORMGRAD_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct LayerNormGradParameter { + OpParameter op_parameter_; + int begin_norm_axis_; + int begin_params_axis_; +} LayerNormGradParameter; + +#endif // MINDSPORE_NNACL_FP32_GRAD_LAYERNORMGRAD_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/optimizer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/optimizer.h new file mode 100644 index 0000000000..91659d6007 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/optimizer.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_GRAD_OPTIMIZER_H_ +#define MINDSPORE_NNACL_FP32_GRAD_OPTIMIZER_H_ + +#include "nnacl/op_base.h" + +typedef struct ApplyMomentumParameter { + OpParameter op_parameter_; + bool use_nesterov_; + float grad_scale_; +} ApplyMomentumParameter; + +typedef struct SgdParameter { + OpParameter op_parameter_; + float dampening_; + bool use_nesterov_; + float weight_decay_; +} SgdParameter; + +typedef struct AdamParameter { + OpParameter op_parameter_; + bool use_nesterov_; +} AdamParameter; + +#endif // MINDSPORE_NNACL_FP32_GRAD_OPTIMIZER_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/pack_ext.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/pack_ext.c similarity index 100% rename from mindspore/lite/nnacl/fp32_grad/pack_ext.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/pack_ext.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/pack_ext.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/pack_ext.h new file mode 100644 index 0000000000..f040fdc580 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/pack_ext.h @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_GRAD_PACK_EXT_H_ +#define MINDSPORE_NNACL_FP32_GRAD_PACK_EXT_H_ + +#include +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void RollingIm2ColPackUnitFp32(const float *input_data, const ConvParameter *conv_param, float *packed_input, + int real_cal_num, int block_index); +void RollingIm2ColPackDwUnitFp32(const float *input_data, const ConvParameter *conv_param, float *packed_input, + int real_cal_num, int block_index); + +void rolling_im2col_hwc(const float *in_data, float *data_col, const ConvParameter *conv_param, int rows, int start); +void rolling_im2row_hwc(const float *in_data, float *data_row, const ConvParameter *conv_param, int rows, int start); +void rolling_col2im_hwc(const float *data_col, float *data_im, const ConvParameter *conv_param, int rows, int start); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_GRAD_PACK_EXT_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/pooling_grad.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/pooling_grad.c similarity index 100% rename from mindspore/lite/nnacl/fp32_grad/pooling_grad.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/pooling_grad.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/pooling_grad.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/pooling_grad.h new file mode 100644 index 0000000000..b794b9b97a --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/pooling_grad.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_GRAD_POOLING_GRAD_H_ +#define MINDSPORE_NNACL_FP32_GRAD_POOLING_GRAD_H_ + +#include "nnacl/fp32/pooling_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif +void AvgPoolingGrad(const float *input_ptr, float *output_ptr, int count, PoolingParameter *pooling_param); +void MaxPoolingGrad(const float *input_ptr, const float *dy_ptr, float *output_ptr, int output_batch, + PoolingParameter *pooling_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_GRAD_POOLING_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/reduce_grad.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/reduce_grad.c similarity index 100% rename from mindspore/lite/nnacl/fp32_grad/reduce_grad.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/reduce_grad.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/reduce_grad.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/reduce_grad.h new file mode 100644 index 0000000000..f004378380 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/reduce_grad.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_GRAD_REDUCE_GRAD_H_ +#define MINDSPORE_NNACL_FP32_GRAD_REDUCE_GRAD_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif +float ReduceMeanAll(const float *src, int size); +void ReduceSumByAxes(const float *input, const int *input_dims, float *output, const int *output_dims, int num_dims); +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_FP32_GRAD_REDUCE_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/resize_grad.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/resize_grad.c similarity index 100% rename from mindspore/lite/nnacl/fp32_grad/resize_grad.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/resize_grad.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/resize_grad.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/resize_grad.h new file mode 100644 index 0000000000..e2411ec6ef --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/resize_grad.h @@ -0,0 +1,44 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_GRAD_RESIZE_GRAD_H_ +#define MINDSPORE_NNACL_FP32_GRAD_RESIZE_GRAD_H_ + +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct ResizeGradParameter { + OpParameter op_parameter_; + bool align_corners_; + int method; + size_t in_height_; + size_t in_width_; + size_t out_height_; + size_t out_width_; + float height_scale_; + float width_scale_; +} ResizeGradParameter; + +void ResizeNearestNeighborGrad(float *in_addr, float *out_addr, int batch_size, int channel, + ResizeGradParameter *param); +void ResizeBiLinearGrad(float *in_addr, float *out_addr, int batch_size, int channel, ResizeGradParameter *param); +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_FP32_GRAD_RESIZE_GRAD_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/smooth_l1_loss.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/smooth_l1_loss.h new file mode 100644 index 0000000000..d7c340710e --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/smooth_l1_loss.h @@ -0,0 +1,27 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_SMOOTH_L1_LOSS_PARAMETER_H_ +#define MINDSPORE_NNACL_FP32_SMOOTH_L1_LOSS_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct SmoothL1LossParameter { + OpParameter op_parameter_; + float beta_; +} SmoothL1LossParameter; + +#endif // MINDSPORE_NNACL_FP32_SMOOTH_L1_LOSS_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/softmax_grad.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/softmax_grad.c similarity index 100% rename from mindspore/lite/nnacl/fp32_grad/softmax_grad.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/softmax_grad.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/softmax_grad.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/softmax_grad.h new file mode 100644 index 0000000000..4c2ae3d5c2 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/softmax_grad.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_GRAD_SOFTMAX_GRAD_H_ +#define MINDSPORE_NNACL_FP32_GRAD_SOFTMAX_GRAD_H_ + +#include "nnacl/op_base.h" +#include "nnacl/fp32/softmax_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct SoftmaxCrossEntropyParameter { + // primitive parameter + OpParameter op_parameter_; + int n_dim_; + + // shape correlative + int input_shape_[5]; + + // other parameter + int32_t batch_size_; + unsigned int number_of_classes_; + bool is_grad_; +} SoftmaxCrossEntropyParameter; + +void SoftmaxGrad(const float *input_ptr, const float *yt_ptr, float *output_ptr, float *sum_data, float *sum_mul, + SoftmaxParameter *parameter); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_GRAD_SOFTMAX_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/strided_slice_grad.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/strided_slice_grad.c similarity index 100% rename from mindspore/lite/nnacl/fp32_grad/strided_slice_grad.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/strided_slice_grad.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/strided_slice_grad.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/strided_slice_grad.h new file mode 100644 index 0000000000..75fe2cf53d --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/strided_slice_grad.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_GRAD_STRIDED_SLICE_GRAD_H_ +#define MINDSPORE_NNACL_FP32_GRAD_STRIDED_SLICE_GRAD_H_ + +#include "nnacl/op_base.h" +#include "nnacl/strided_slice_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int DoStridedSliceGrad(const float *inputs, float *output, const int *dx_shape, StridedSliceParameter *param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_GRAD_STRIDED_SLICE_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/unsorted_segment_sum.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/unsorted_segment_sum.c similarity index 100% rename from mindspore/lite/nnacl/fp32_grad/unsorted_segment_sum.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/unsorted_segment_sum.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/unsorted_segment_sum.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/unsorted_segment_sum.h new file mode 100644 index 0000000000..c2ea9e03fc --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/unsorted_segment_sum.h @@ -0,0 +1,29 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_FP32_GRAD_UNSORTED_SEGMENT_SUM_H_ +#define MINDSPORE_NNACL_FP32_GRAD_UNSORTED_SEGMENT_SUM_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +int UnsortedSegmentSum(const float *input, int unit_num, int input_dim1, const int *indices, float *output, + int output_dim0, int output_dim1); +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_FP32_GRAD_UNSORTED_SEGMENT_SUM_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/utils.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/utils.h new file mode 100644 index 0000000000..618e1bc6fc --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/utils.h @@ -0,0 +1,72 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FP32_GRAD_UTILS_H_ +#define MINDSPORE_NNACL_FP32_GRAD_UTILS_H_ + +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +static inline size_t GetInputOffset(int num_dims, const int *dims, const int *iter) { + size_t offset = 0; + for (int idx = 0; idx < num_dims; ++idx) { + offset = offset * (size_t)(dims[idx]) + (size_t)(iter[idx]); + } + + return offset; +} + +static inline size_t GetOutputOffset(int num_dims, const int *dims, const int *iter, int num_axis, const int *axes) { + size_t offset = 0; + for (int idx = 0; idx < num_dims; ++idx) { + // if we need to skip this axis + int is_axis = 0; + for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) { + if (idx == axes[axis_idx]) { + is_axis = 1; + break; + } + } + + if (is_axis == 0) { + offset = offset * (size_t)(dims[idx]) + (size_t)(iter[idx]); + } + } + return offset; +} + +static inline int NextIndex(int num_dims, const int *dims, int *current) { + int carry = 1; + for (int idx = num_dims - 1; idx >= 0; --idx) { + int current_val = current[idx] + carry; + if (dims[idx] == current_val) { + current[idx] = 0; + } else { + current[idx] = current_val; + carry = 0; + break; + } + } + return (carry == 0); +} + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_FP32_GRAD_UTILS_H_ diff --git a/mindspore/lite/nnacl/gather_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/gather_parameter.h similarity index 100% rename from mindspore/lite/nnacl/gather_parameter.h rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/gather_parameter.h diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/gelu_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/gelu_parameter.h new file mode 100644 index 0000000000..932b36a7f1 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/gelu_parameter.h @@ -0,0 +1,28 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_GELU_PARAMETER_H_ +#define MINDSPORE_NNACL_GELU_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct GeLUParameter { + // Primitive parameter + OpParameter op_parameter_; + bool approximate_; +} GeLUParameter; + +#endif // MINDSPORE_NNACL_GELU_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/gru_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/gru_parameter.h new file mode 100644 index 0000000000..8f71f43d22 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/gru_parameter.h @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_GRU_PARAMETER_H_ +#define MINDSPORE_NNACL_GRU_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct GruParameter { + // Primitive parameter + OpParameter op_parameter_; + // shape correlative + int input_size_; + int hidden_size_; // output_size + int seq_len_; + int batch_; + // other parameter + int output_step_; + bool bidirectional_; + int input_row_align_; + int input_col_align_; + int state_row_align_; + int state_col_align_; +} GruParameter; + +#endif // MINDSPORE_NNACL_GRU_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/infer/adam_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/adam_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/adam_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/adam_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/adam_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/adam_infer.h new file mode 100644 index 0000000000..ab6e4f0c95 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/adam_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_ADAM_INFER_H +#define MINDSPORE_NNACL_ADAM_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int AdamInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_ADAM_INFER_H diff --git a/mindspore/lite/nnacl/infer/add_sub_grad_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/add_sub_grad_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/add_sub_grad_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/add_sub_grad_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/add_sub_grad_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/add_sub_grad_infer.h new file mode 100644 index 0000000000..870cb1db0f --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/add_sub_grad_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_ADD_SUB_GRAD_INFER_H +#define MINDSPORE_NNACL_ADD_SUB_GRAD_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int AddSubGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_ADD_SUB_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/addn_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/addn_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/addn_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/addn_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/addn_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/addn_infer.h new file mode 100644 index 0000000000..a0c889c600 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/addn_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_ADDN_INFER_H +#define MINDSPORE_NNACL_ADDN_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int AddnInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_ADDN_INFER_H diff --git a/mindspore/lite/nnacl/infer/apply_momentum_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/apply_momentum_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/apply_momentum_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/apply_momentum_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/apply_momentum_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/apply_momentum_infer.h new file mode 100644 index 0000000000..f7460f0b3b --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/apply_momentum_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_APPLY_MOMENTUM_INFER_H +#define MINDSPORE_NNACL_APPLY_MOMENTUM_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ApplyMomentumInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_APPLY_MOMENTUM_INFER_H diff --git a/mindspore/lite/nnacl/infer/argmin_max_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/argmin_max_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/argmin_max_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/argmin_max_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/argmin_max_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/argmin_max_infer.h new file mode 100644 index 0000000000..2febbc7850 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/argmin_max_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_ARGMAX_INFER_H +#define MINDSPORE_NNACL_ARGMAX_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/arg_min_max_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ArgMinMaxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_ARGMAX_INFER_H diff --git a/mindspore/lite/nnacl/infer/arithmetic_compare_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_compare_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/arithmetic_compare_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_compare_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_compare_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_compare_infer.h new file mode 100644 index 0000000000..5513fb1219 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_compare_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_ARITHMETIC_COMPARE_INFER_H +#define MINDSPORE_NNACL_ARITHMETIC_COMPARE_INFER_H + +#include "nnacl/infer/arithmetic_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ArithmeticCompareInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_ARITHMETIC_COMPARE_INFER_H diff --git a/mindspore/lite/nnacl/infer/arithmetic_grad_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_grad_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/arithmetic_grad_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_grad_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_grad_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_grad_infer.h new file mode 100644 index 0000000000..bedf1dabb2 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_grad_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INFER_ARITHMETIC_GRAD_INFER_H_ +#define MINDSPORE_NNACL_INFER_ARITHMETIC_GRAD_INFER_H_ + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ArithmeticGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_INFER_ARITHMETIC_GRAD_INFER_H_ diff --git a/mindspore/lite/nnacl/infer/arithmetic_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/arithmetic_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_infer.h new file mode 100644 index 0000000000..c6b06bef69 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_ARITHMETIC_INFER_H +#define MINDSPORE_NNACL_ARITHMETIC_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/arithmetic.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ArithmeticInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outpus_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_ARITHMETIC_INFER_H diff --git a/mindspore/lite/nnacl/infer/assert_op_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assert_op_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/assert_op_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assert_op_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assert_op_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assert_op_infer.h new file mode 100644 index 0000000000..5f0156e95d --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assert_op_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_ASSERT_OP_INFER_H +#define MINDSPORE_NNACL_ASSERT_OP_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int AssertOpInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_ASSERT_OP_INFER_H diff --git a/mindspore/lite/nnacl/infer/assign_add_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assign_add_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/assign_add_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assign_add_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assign_add_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assign_add_infer.h new file mode 100644 index 0000000000..1f7cea8aba --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assign_add_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_ASSIGN_ADD_INFER_H +#define MINDSPORE_NNACL_ASSIGN_ADD_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int AssignAddInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_ASSIGN_ADD_INFER_H diff --git a/mindspore/lite/nnacl/infer/assign_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assign_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/assign_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assign_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assign_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assign_infer.h new file mode 100644 index 0000000000..1de69e2185 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/assign_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_ASSIGN_INFER_H +#define MINDSPORE_NNACL_ASSIGN_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int AssignInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_ASSIGN_INFER_H diff --git a/mindspore/lite/nnacl/infer/audio_spectrogram_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/audio_spectrogram_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/audio_spectrogram_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/audio_spectrogram_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/audio_spectrogram_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/audio_spectrogram_infer.h new file mode 100644 index 0000000000..af5daa018b --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/audio_spectrogram_infer.h @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_AUDIO_SPECTROGRAM_INFER_H +#define MINDSPORE_NNACL_AUDIO_SPECTROGRAM_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct AudioSpectrogramParameter { + OpParameter op_parameter_; + int window_size_; + int stride_; +} AudioSpectrogramParameter; + +int AudioSpectrogramInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_AUDIO_SPECTROGRAM_INFER_H diff --git a/mindspore/lite/nnacl/infer/batch_to_space_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/batch_to_space_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/batch_to_space_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/batch_to_space_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/batch_to_space_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/batch_to_space_infer.h new file mode 100644 index 0000000000..dd1a64783c --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/batch_to_space_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_BATCH_TO_SPACE_INFER_H +#define MINDSPORE_NNACL_BATCH_TO_SPACE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/batch_to_space.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int BatchToSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_BATCH_TO_SPACE_INFER_H diff --git a/mindspore/lite/nnacl/infer/bias_grad_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/bias_grad_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/bias_grad_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/bias_grad_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/bias_grad_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/bias_grad_infer.h new file mode 100644 index 0000000000..3d87f516c4 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/bias_grad_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_BIAS_GRAD_INFER_H +#define MINDSPORE_NNACL_BIAS_GRAD_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int BiasGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_BIAS_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/binary_cross_entropy_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/binary_cross_entropy_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/binary_cross_entropy_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/binary_cross_entropy_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/binary_cross_entropy_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/binary_cross_entropy_infer.h new file mode 100644 index 0000000000..6f4657c643 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/binary_cross_entropy_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_BINARY_CROSS_ENTROPY_INFER_H +#define MINDSPORE_NNACL_BINARY_CROSS_ENTROPY_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32_grad/binary_cross_entropy.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int BinaryCrossEntropyInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_BINARY_CROSS_ENTROPY_INFER_H diff --git a/mindspore/lite/nnacl/infer/bn_grad_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/bn_grad_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/bn_grad_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/bn_grad_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/bn_grad_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/bn_grad_infer.h new file mode 100644 index 0000000000..5968586cb4 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/bn_grad_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_BN_GRAD_INFER_H +#define MINDSPORE_NNACL_BN_GRAD_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int BnGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_BN_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/broadcast_to_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/broadcast_to_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/broadcast_to_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/broadcast_to_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/broadcast_to_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/broadcast_to_infer.h new file mode 100644 index 0000000000..90d818efea --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/broadcast_to_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_BROADCAST_TO_INFER_H +#define MINDSPORE_NNACL_BROADCAST_TO_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/broadcast_to_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int BroadcastToInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outpus_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_BROADCAST_TO_INFER_H diff --git a/mindspore/lite/nnacl/infer/cast_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/cast_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/cast_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/cast_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/cast_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/cast_infer.h new file mode 100644 index 0000000000..a34e541acc --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/cast_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_CAST_INFER_H +#define MINDSPORE_NNACL_CAST_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int CastInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_CAST_INFER_H diff --git a/mindspore/lite/nnacl/infer/common_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/common_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.h new file mode 100644 index 0000000000..5a024f43b0 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.h @@ -0,0 +1,212 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_COMMON_H_ +#define MINDSPORE_NNACL_COMMON_H_ + +#include +#include "nnacl/errorcode.h" +#include "nnacl/op_base.h" +#include "nnacl/tensor_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define kNCHW_N 0 +#define kNCHW_C 1 +#define kNCHW_H 2 +#define kNCHW_W 3 + +typedef enum FormatC { + Format_NCHW = 0, + Format_NHWC = 1, + Format_NHWC4 = 2, + Format_HWKC = 3, + Format_HWCK = 4, + Format_KCHW = 5, + Format_CKHW = 6, + Format_KHWC = 7, + Format_CHWK = 8, + Format_HW = 9, + Format_HW4 = 10, + Format_NC = 11, + Format_NC4 = 12, + Format_NC4HW4 = 13, + Format_NUM_OF_FORMAT = 14, + Format_MIN = Format_NCHW, + Format_MAX = Format_NUM_OF_FORMAT +} FormatC; + +typedef enum TypeIdC { + kTypeUnknown = 0, + kMetaTypeBegin = kTypeUnknown, + kMetaTypeType, // Type + kMetaTypeAnything, + kMetaTypeObject, + kMetaTypeTypeType, // TypeType + kMetaTypeProblem, + kMetaTypeExternal, + kMetaTypeNone, + kMetaTypeNull, + kMetaTypeEllipsis, + kMetaTypeEnd, + // + // Object types + // + kObjectTypeBegin = kMetaTypeEnd, + kObjectTypeNumber, + kObjectTypeString, + kObjectTypeList, + kObjectTypeTuple, + kObjectTypeSlice, + kObjectTypeKeyword, + kObjectTypeTensorType, + kObjectTypeRowTensorType, + kObjectTypeSparseTensorType, + kObjectTypeUndeterminedType, + kObjectTypeClass, + kObjectTypeDictionary, + kObjectTypeFunction, + kObjectTypeJTagged, + kObjectTypeSymbolicKeyType, + kObjectTypeEnvType, + kObjectTypeRefKey, + kObjectTypeRef, + kObjectTypeEnd, + // + // Number Types + // + kNumberTypeBegin = kObjectTypeEnd, + kNumberTypeBool, + kNumberTypeInt, + kNumberTypeInt8, + kNumberTypeInt16, + kNumberTypeInt32, + kNumberTypeInt64, + kNumberTypeUInt, + kNumberTypeUInt8, + kNumberTypeUInt16, + kNumberTypeUInt32, + kNumberTypeUInt64, + kNumberTypeFloat, + kNumberTypeFloat16, + kNumberTypeFloat32, + kNumberTypeFloat64, + kNumberTypeComplex64, + kNumberTypeEnd +} TypeIdC; + +enum NNACLLshProjectionType { + LshProjectionType_UNKNOWN = 0, + LshProjectionType_SPARSE = 1, + LshProjectionType_DENSE = 2, + LshProjectionType_MIN = LshProjectionType_UNKNOWN, + LshProjectionType_MAX = LshProjectionType_DENSE +}; + +enum NNACLQuantType { + QuantType_QUANT_NONE = 0, + QuantType_AwareTraining = 1, + QuantType_WeightQuant = 2, + QuantType_PostTraining = 3, + QuantType_QUANT_WEIGHT = 4, + QuantType_QUANT_ALL = 5, + QuantType_MIN = QuantType_QUANT_NONE, + QuantType_MAX = QuantType_QUANT_ALL +}; + +typedef struct vvector { + int **shape_; // value of shapes + int *shape_size_; // size of shape + size_t size_; // number of shapes +} vvector; + +typedef struct TensorListC { + bool is_ready_; + int data_type_; + int format_; + + int tensors_data_type_; // element_data_type_, keep same as c++ + int max_elements_num_; + int element_shape_[8]; + size_t element_num_; + size_t element_shape_size_; + TensorC *tensors_; +} TensorListC; + +typedef struct VectorC { + int *data_; + size_t size_; + size_t max_size_; + size_t per_malloc_size_; +} VectorC; + +int MallocTensorListData(TensorListC *tensor_list, TypeIdC dtype, vvector *tensor_shape); +int TensorListMergeShape(int *element_shape, size_t *element_shape_size, const int *tmp, size_t tmp_size); +bool TensorListIsFullyDefined(int *shape, size_t shape_size); + +int GetBatch(const TensorC *tensor); +int GetHeight(const TensorC *tensor); +int GetWidth(const TensorC *tensor); +int GetChannel(const TensorC *tensor); +int GetElementNum(const TensorC *tensor); +int GetDimensionSize(const TensorC *tensor, const size_t index); + +int CheckAugmentNull(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); +int CheckAugmentNullSize(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter, size_t inputs_size_obj, size_t outputs_size_obj); +int CheckAugmentNullSizeInputTwo(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter, size_t inputs_size_obj_0, + size_t inputs_size_obj_1, size_t outputs_size_obj); +int CheckAugmentNullInputSize(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter, size_t inputs_size_obj); +int CheckAugmentNullOutputSize(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter, size_t outputs_size_obj); +void SetDataTypeFormat(TensorC *dst, const TensorC *src); + +int SetShapeTensor(TensorC *dst, const TensorC *src); +int SetShapeArray(TensorC *dst, int *src, size_t src_size); +int ShapeSet(int *dst_shape, size_t *dst_shape_size, const int *src_shape, size_t src_shape_size); +int ShapePush(int *shape, size_t *shape_size, int value); +int ShapeInsert(int *shape, size_t *shape_size, int index, int value); +int ShapeErase(int *shape, size_t *shape_size, int index); +bool ShapeEqual(const int *shape0, size_t shape0_size, const int *shape1, size_t shape1_size); + +void iswap(int *a, int *b); + +int imin(int a, int b); +int imax(int a, int b); + +int CommonInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); +int FftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +int VectorCInit(VectorC *vc, size_t per_malloc_size); +void VectorCSet(VectorC *vc, const int *src_shape, size_t src_shape_size); +void VectorCPush(VectorC *vc, int value); +void VectorCInsert(VectorC *vc, int index, int value); +void VectorCErase(VectorC *vc, int index); +bool VectorCEqual(VectorC *vc1, VectorC *vc2); +void VectorCFree(VectorC *vc); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_COMMON__H_ diff --git a/mindspore/lite/nnacl/infer/concat_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/concat_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/concat_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/concat_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/concat_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/concat_infer.h new file mode 100644 index 0000000000..315d60c8f7 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/concat_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_CONCAT_INFER_H +#define MINDSPORE_NNACL_CONCAT_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/concat_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ConcatInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_CONCAT_INFER_H diff --git a/mindspore/lite/nnacl/infer/constant_of_shape_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/constant_of_shape_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/constant_of_shape_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/constant_of_shape_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/constant_of_shape_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/constant_of_shape_infer.h new file mode 100644 index 0000000000..baceb1201a --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/constant_of_shape_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_CONSTANT_OF_SHAPE_INFER_H +#define MINDSPORE_NNACL_CONSTANT_OF_SHAPE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/constant_of_shape_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ConstantOfShapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_CONSTANT_OF_SHAPE_INFER_H diff --git a/mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_filter_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_filter_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_filter_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_filter_infer.h new file mode 100644 index 0000000000..02deb60f64 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_filter_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_CONV2D_GRAD_FILTER_INFER_H +#define MINDSPORE_NNACL_CONV2D_GRAD_FILTER_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int Conv2dGradFilterInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_CONV2D_GRAD_FILTER_INFER_H diff --git a/mindspore/lite/nnacl/infer/conv2d_grad_input_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_input_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/conv2d_grad_input_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_input_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_input_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_input_infer.h new file mode 100644 index 0000000000..17ae57a8e5 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_input_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_CONV2D_GRAD_INPUT_INFER_H +#define MINDSPORE_NNACL_CONV2D_GRAD_INPUT_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int Conv2dGradInputInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_CONV2D_GRAD_INPUT_INFER_H diff --git a/mindspore/lite/nnacl/infer/conv2d_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/conv2d_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_infer.h new file mode 100644 index 0000000000..fc4b8a6016 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_CONV2D_INFER_H +#define MINDSPORE_NNACL_CONV2D_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int Conv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_CONV2D_INFER_H diff --git a/mindspore/lite/nnacl/infer/crop_and_resize_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/crop_and_resize_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/crop_and_resize_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/crop_and_resize_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/crop_and_resize_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/crop_and_resize_infer.h new file mode 100644 index 0000000000..3cb45b524e --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/crop_and_resize_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_CROP_AND_RESIZE_INFER_H +#define MINDSPORE_NNACL_CROP_AND_RESIZE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int CropAndResizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_CROP_AND_RESIZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/crop_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/crop_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/crop_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/crop_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/crop_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/crop_infer.h new file mode 100644 index 0000000000..0691e0e1be --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/crop_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_CROP_INFER_H +#define MINDSPORE_NNACL_CROP_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/crop_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int CropInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_CROP_INFER_H diff --git a/mindspore/lite/nnacl/infer/custom_extract_features_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_extract_features_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/custom_extract_features_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_extract_features_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_extract_features_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_extract_features_infer.h new file mode 100644 index 0000000000..8890561c80 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_extract_features_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_CUSTOM_EXTRACT_FEATURES_INFER_H +#define MINDSPORE_NNACL_CUSTOM_EXTRACT_FEATURES_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int CustomExtractFeaturesInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_CUSTOM_EXTRACT_FEATURES_INFER_H diff --git a/mindspore/lite/nnacl/infer/custom_normalize_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_normalize_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/custom_normalize_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_normalize_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_normalize_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_normalize_infer.h new file mode 100644 index 0000000000..63558b5b44 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_normalize_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_CUSTOM_NORMALIZE_INFER_H +#define MINDSPORE_NNACL_CUSTOM_NORMALIZE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/softmax_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int CustomNormalizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_CUSTOM_NORMALIZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/custom_predict_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_predict_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/custom_predict_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_predict_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_predict_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_predict_infer.h new file mode 100644 index 0000000000..e78ec87c2b --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/custom_predict_infer.h @@ -0,0 +1,36 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_CUSTOM_PREDICT_INFER_H +#define MINDSPORE_NNACL_CUSTOM_PREDICT_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct CustomPredictParameter { + OpParameter op_parameter_; + int output_num; +} CustomPredictParameter; + +int CustomPredictInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_CUSTOM_PREDICT_INFER_H diff --git a/mindspore/lite/nnacl/infer/deconv2d_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/deconv2d_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/deconv2d_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/deconv2d_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/deconv2d_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/deconv2d_infer.h new file mode 100644 index 0000000000..5bc0689dcd --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/deconv2d_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_DECONV2D_INFER_H +#define MINDSPORE_NNACL_DECONV2D_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int Deconv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_DECONV2D_INFER_H diff --git a/mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dedepthwise_conv2d_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dedepthwise_conv2d_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dedepthwise_conv2d_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dedepthwise_conv2d_infer.h new file mode 100644 index 0000000000..3a989d851f --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dedepthwise_conv2d_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_DEDEPTHWISE_CONV2D_INFER_H +#define MINDSPORE_NNACL_DEDEPTHWISE_CONV2D_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int DeDepthwiseConv2DInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_DEDEPTHWISE_CONV2D_INFER_H diff --git a/mindspore/lite/nnacl/infer/depth_to_space_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/depth_to_space_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/depth_to_space_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/depth_to_space_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/depth_to_space_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/depth_to_space_infer.h new file mode 100644 index 0000000000..d80414d3c3 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/depth_to_space_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_DEPTHTOSPACE_INFER_H +#define MINDSPORE_NNACL_DEPTHTOSPACE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/depth_to_space_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int DepthToSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_DEPTHTOSPACE_INFER_H diff --git a/mindspore/lite/nnacl/infer/depthwise_conv2d_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/depthwise_conv2d_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/depthwise_conv2d_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/depthwise_conv2d_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/depthwise_conv2d_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/depthwise_conv2d_infer.h new file mode 100644 index 0000000000..d3b840a3a2 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/depthwise_conv2d_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_DEPTHWISE_CONV2D_INFER_H +#define MINDSPORE_NNACL_DEPTHWISE_CONV2D_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int DepthwiseConv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_DEPTHWISE_CONV2D_INFER_H diff --git a/mindspore/lite/nnacl/infer/detection_post_process_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/detection_post_process_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/detection_post_process_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/detection_post_process_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/detection_post_process_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/detection_post_process_infer.h new file mode 100644 index 0000000000..6e74e56f7e --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/detection_post_process_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_DETECTION_POST_PROCESS_INFER_H +#define MINDSPORE_NNACL_DETECTION_POST_PROCESS_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/detection_post_process_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int DetectionPostProcessInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_DETECTION_POST_PROCESS_INFER_H diff --git a/mindspore/lite/nnacl/infer/dropout_grad_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dropout_grad_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/dropout_grad_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dropout_grad_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dropout_grad_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dropout_grad_infer.h new file mode 100644 index 0000000000..df8d9c5a27 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dropout_grad_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_DROPOUT_GRAD_INFER_H +#define MINDSPORE_NNACL_DROPOUT_GRAD_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int DropoutGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_DROPOUT_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/dropout_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dropout_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/dropout_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dropout_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dropout_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dropout_infer.h new file mode 100644 index 0000000000..c592a9bfa6 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/dropout_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_DROPOUT_INFER_H +#define MINDSPORE_NNACL_DROPOUT_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int DropoutInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_DROPOUT_INFER_H diff --git a/mindspore/lite/nnacl/infer/embedding_lookup_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/embedding_lookup_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/embedding_lookup_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/embedding_lookup_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/embedding_lookup_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/embedding_lookup_infer.h new file mode 100644 index 0000000000..581b1cd886 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/embedding_lookup_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_EMBEDDING_LOOKUP_INFER_H +#define MINDSPORE_NNACL_EMBEDDING_LOOKUP_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int EmbeddingLookupInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_EMBEDDING_LOOKUP_INFER_H diff --git a/mindspore/lite/nnacl/infer/expand_dims_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/expand_dims_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/expand_dims_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/expand_dims_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/expand_dims_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/expand_dims_infer.h new file mode 100644 index 0000000000..170b76cd07 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/expand_dims_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_EXPAND_DIMS_INFER_H +#define MINDSPORE_NNACL_EXPAND_DIMS_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ExpandDimsInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_EXPAND_DIMS_INFER_H diff --git a/mindspore/lite/nnacl/infer/fft_imag_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fft_imag_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/fft_imag_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fft_imag_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fft_imag_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fft_imag_infer.h new file mode 100644 index 0000000000..8dcb2bb726 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fft_imag_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FFT_IMAG_INFER_H +#define MINDSPORE_NNACL_FFT_IMAG_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int FftImagInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_FFT_IMAG_INFER_H diff --git a/mindspore/lite/nnacl/infer/fft_real_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fft_real_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/fft_real_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fft_real_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fft_real_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fft_real_infer.h new file mode 100644 index 0000000000..d3e8930325 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fft_real_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FFT_REAL_INFER_H +#define MINDSPORE_NNACL_FFT_REAL_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int FftRealInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_FFT_REAL_INFER_H diff --git a/mindspore/lite/nnacl/infer/fill_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fill_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/fill_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fill_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fill_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fill_infer.h new file mode 100644 index 0000000000..9bc39a5e8a --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fill_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FILL_INFER_H +#define MINDSPORE_NNACL_FILL_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int FillInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_FILL_INFER_H diff --git a/mindspore/lite/nnacl/infer/flatten_grad_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/flatten_grad_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/flatten_grad_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/flatten_grad_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/flatten_grad_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/flatten_grad_infer.h new file mode 100644 index 0000000000..503eecbccf --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/flatten_grad_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FLATTEN_GRAD_INFER_INFER_H +#define MINDSPORE_NNACL_FLATTEN_GRAD_INFER_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int FlattenGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_FLATTEN_GRAD_INFER_INFER_H diff --git a/mindspore/lite/nnacl/infer/flatten_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/flatten_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/flatten_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/flatten_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/flatten_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/flatten_infer.h new file mode 100644 index 0000000000..755c7b3664 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/flatten_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FLATTEN_INFER_H +#define MINDSPORE_NNACL_FLATTEN_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int FlattenInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_FLATTEN_INFER_H diff --git a/mindspore/lite/nnacl/infer/full_connection_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/full_connection_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/full_connection_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/full_connection_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/full_connection_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/full_connection_infer.h new file mode 100644 index 0000000000..68cc05a5a3 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/full_connection_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FULL_CONNECTION_INFER_H +#define MINDSPORE_NNACL_FULL_CONNECTION_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/matmul_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int FullConnectionInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_FULL_CONNECTION_INFER_H diff --git a/mindspore/lite/nnacl/infer/fused_batchnorm_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fused_batchnorm_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/fused_batchnorm_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fused_batchnorm_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fused_batchnorm_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fused_batchnorm_infer.h new file mode 100644 index 0000000000..5d6ccd2f6e --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/fused_batchnorm_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_FUSED_BATCHNORM_INFER_H +#define MINDSPORE_NNACL_FUSED_BATCHNORM_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int FusedBatchNormInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_FUSED_BATCHNORM_INFER_H diff --git a/mindspore/lite/nnacl/infer/gather_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gather_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/gather_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gather_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gather_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gather_infer.h new file mode 100644 index 0000000000..0c4d763c6b --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gather_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_GATHER_INFER_H +#define MINDSPORE_NNACL_GATHER_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/gather_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int GatherInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_GATHER_INFER_H diff --git a/mindspore/lite/nnacl/infer/gather_nd_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gather_nd_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/gather_nd_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gather_nd_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gather_nd_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gather_nd_infer.h new file mode 100644 index 0000000000..cd90d84bed --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gather_nd_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_GATHER_ND_INFER_H +#define MINDSPORE_NNACL_GATHER_ND_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/gatherNd_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int GatherNdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_GATHER_ND_INFER_H diff --git a/mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/group_conv2d_grad_input_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/group_conv2d_grad_input_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/group_conv2d_grad_input_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/group_conv2d_grad_input_infer.h new file mode 100644 index 0000000000..e9cf444875 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/group_conv2d_grad_input_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_GROUP_CONV2D_GRAD_INPUT_INFER_H +#define MINDSPORE_NNACL_GROUP_CONV2D_GRAD_INPUT_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int GroupConv2dGradInputInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_GROUP_CONV2D_GRAD_INPUT_INFER_H diff --git a/mindspore/lite/nnacl/infer/gru_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gru_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/gru_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gru_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gru_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gru_infer.h new file mode 100644 index 0000000000..14fc084aaa --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/gru_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_GRU_INFER_H +#define MINDSPORE_NNACL_GRU_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/gru_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int GruInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_GRU_INFER_H diff --git a/mindspore/lite/nnacl/infer/hashtable_lookup_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/hashtable_lookup_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/hashtable_lookup_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/hashtable_lookup_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/hashtable_lookup_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/hashtable_lookup_infer.h new file mode 100644 index 0000000000..7e0c034972 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/hashtable_lookup_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_HASHTABLE_LOOKUP_INFER_H +#define MINDSPORE_NNACL_HASHTABLE_LOOKUP_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int HashtableLoopupInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_HASHTABLE_LOOKUP_INFER_H diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/infer.h new file mode 100644 index 0000000000..cac9621973 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/infer.h @@ -0,0 +1,33 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INFER_INFER_H_ +#define MINDSPORE_NNACL_INFER_INFER_H_ + +#include "nnacl/tensor_c.h" +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +typedef int (*InferShape)(const TensorC *const *inputs, size_t input_size, TensorC **outputs, size_t output_size, + OpParameter *parameter); + +InferShape GetInferFunc(int prim_type); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_INFER_INFER_H_ diff --git a/mindspore/lite/nnacl/infer/infer_register.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/infer_register.c similarity index 100% rename from mindspore/lite/nnacl/infer/infer_register.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/infer_register.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/infer_register.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/infer_register.h new file mode 100644 index 0000000000..e968ba15c2 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/infer_register.h @@ -0,0 +1,233 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INFER_INFER_REGISTER_H_ +#define MINDSPORE_NNACL_INFER_INFER_REGISTER_H_ + +#include "nnacl/tensor_c.h" +#include "nnacl/op_base.h" +#include "nnacl/infer/infer.h" + +#ifdef __cplusplus +extern "C" { +#endif +enum PrimType { + PrimType_NONE = 0, + PrimType_Abs = 1, + PrimType_Activation = 2, + PrimType_ActivationGrad = 3, + PrimType_Adam = 4, + PrimType_AddFusion = 5, + PrimType_AdderFusion = 6, + PrimType_AddGrad = 7, + PrimType_AddN = 8, + PrimType_All = 9, + PrimType_ApplyMomentum = 10, + PrimType_ArgMaxFusion = 11, + PrimType_ArgMinFusion = 12, + PrimType_Assert = 13, + PrimType_Assign = 14, + PrimType_AssignAdd = 15, + PrimType_AudioSpectrogram = 16, + PrimType_AvgPoolFusion = 17, + PrimType_AvgPoolGrad = 18, + PrimType_BatchNorm = 19, + PrimType_BatchNormGrad = 20, + PrimType_BatchToSpace = 21, + PrimType_BatchToSpaceND = 22, + PrimType_BiasAdd = 23, + PrimType_BinaryCrossEntropy = 24, + PrimType_BinaryCrossEntropyGrad = 25, + PrimType_BiasAddGrad = 26, + PrimType_BroadcastTo = 27, + PrimType_Cast = 28, + PrimType_Ceil = 29, + PrimType_Clip = 30, + PrimType_Concat = 31, + PrimType_ControlDepend = 32, + PrimType_Conv2DBackpropFilterFusion = 33, + PrimType_Conv2DBackpropInputFusion = 34, + PrimType_Conv2DFusion = 35, + PrimType_Conv2dTransposeFusion = 36, + PrimType_Cos = 37, + PrimType_ConstantOfShape = 38, + PrimType_Crop = 39, + PrimType_CustomExtractFeatures = 40, + PrimType_CustomNormalize = 41, + PrimType_CustomPredict = 42, + PrimType_DeConv2DGradFilter = 43, + PrimType_Depend = 44, + PrimType_DepthToSpace = 45, + PrimType_DetectionPostProcess = 46, + PrimType_DivFusion = 47, + PrimType_DivGrad = 48, + PrimType_Dropout = 49, + PrimType_DropoutGrad = 50, + PrimType_Elu = 51, + PrimType_Eltwise = 52, + PrimType_Equal = 53, + PrimType_EmbeddingLookupFusion = 54, + PrimType_ExpFusion = 55, + PrimType_ExpandDims = 56, + PrimType_FakeQuantWithMinMaxVars = 57, + PrimType_FakeQuantWithMinMaxVarsPerChannel = 58, + PrimType_FftReal = 59, + PrimType_FftImag = 60, + PrimType_Flatten = 61, + PrimType_FlattenGrad = 62, + PrimType_Floor = 63, + PrimType_FloorDiv = 64, + PrimType_FloorMod = 65, + PrimType_Fill = 66, + PrimType_FullConnection = 67, + PrimType_FusedBatchNorm = 68, + PrimType_Gather = 69, + PrimType_GatherNd = 70, + PrimType_Greater = 71, + PrimType_GreaterEqual = 72, + PrimType_HashtableLookup = 73, + PrimType_InstanceNorm = 74, + PrimType_LayerNormFusion = 75, + PrimType_LeakyRelu = 76, + PrimType_Less = 77, + PrimType_LessEqual = 78, + PrimType_Log = 79, + PrimType_LogGrad = 80, + PrimType_LogicalAnd = 81, + PrimType_LogicalNot = 82, + PrimType_LogicalOr = 83, + PrimType_LpNormalization = 84, + PrimType_LRN = 85, + PrimType_LshProjection = 86, + PrimType_LSTM = 87, + PrimType_L2NormalizeFusion = 88, + PrimType_MatMul = 89, + PrimType_Maximum = 90, + PrimType_MaximumGrad = 91, + PrimType_MaxPoolFusion = 92, + PrimType_MaxPoolGrad = 93, + PrimType_Merge = 94, + PrimType_Mfcc = 95, + PrimType_Minimum = 96, + PrimType_MinimumGrad = 97, + PrimType_Mod = 98, + PrimType_MulFusion = 99, + PrimType_MulGrad = 100, + PrimType_Neg = 101, + PrimType_NegGrad = 102, + PrimType_NotEqual = 103, + PrimType_NonMaxSuppression = 104, + PrimType_OneHot = 105, + PrimType_OnesLike = 106, + PrimType_PadFusion = 107, + PrimType_PartialFusion = 108, + PrimType_PowerGrad = 109, + PrimType_PowFusion = 110, + PrimType_PriorBox = 111, + PrimType_PReLUFusion = 112, + PrimType_QuantDTypeCast = 113, + PrimType_Rank = 114, + PrimType_Range = 115, + PrimType_Reciprocal = 116, + PrimType_RealDiv = 117, + PrimType_ReduceFusion = 118, + PrimType_Reshape = 119, + PrimType_Resize = 120, + PrimType_ReverseSequence = 121, + PrimType_ReverseV2 = 122, + PrimType_Rfft = 123, + PrimType_ROIPooling = 124, + PrimType_Round = 125, + PrimType_Rsqrt = 126, + PrimType_ScaleFusion = 127, + PrimType_ScatterNd = 128, + PrimType_SGD = 129, + PrimType_Shape = 130, + PrimType_SigmoidCrossEntropyWithLogits = 131, + PrimType_SigmoidCrossEntropyWithLogitsGrad = 132, + PrimType_Sin = 133, + PrimType_SkipGram = 134, + PrimType_SliceFusion = 135, + PrimType_SmoothL1Loss = 136, + PrimType_SmoothL1LossGrad = 137, + PrimType_Softmax = 138, + PrimType_SoftmaxCrossEntropyWithLogits = 139, + PrimType_SpaceToBatch = 140, + PrimType_SpaceToBatchND = 141, + PrimType_SpaceToDepth = 142, + PrimType_SparseSoftmaxCrossEntropyWithLogits = 143, + PrimType_SparseToDense = 144, + PrimType_Split = 145, + PrimType_Sqrt = 146, + PrimType_Squeeze = 147, + PrimType_Square = 148, + PrimType_SquaredDifference = 149, + PrimType_Stack = 150, + PrimType_StridedSlice = 151, + PrimType_SubFusion = 152, + PrimType_SubGrad = 153, + PrimType_Switch = 154, + PrimType_TensorListFromTensor = 155, + PrimType_TensorListGetItem = 156, + PrimType_TensorListReserve = 157, + PrimType_TensorListSetItem = 158, + PrimType_TensorListStack = 159, + PrimType_TileFusion = 160, + PrimType_TopKFusion = 161, + PrimType_Transpose = 162, + PrimType_Unique = 163, + PrimType_UnsortedSegmentSum = 164, + PrimType_Unsqueeze = 165, + PrimType_Unstack = 166, + PrimType_While = 167, + PrimType_Where = 168, + PrimType_ZerosLike = 169, + PrimType_Select = 170, + PrimType_If = 171, + PrimType_GRU = 172, + PrimType_NonZero = 173, + PrimType_InvertPermutation = 174, + PrimType_Size = 175, + PrimType_RandomStandardNormal = 176, + PrimType_CropAndResize = 177, + PrimType_Erf = 178, + PrimType_StridedSliceGrad = 179, + PrimType_IsFinite = 180, + PrimType_LinSpace = 181, + PrimType_UniformReal = 182, + PrimType_AbsGrad = 183, + PrimType_RsqrtGrad = 184, + PrimType_SqrtGrad = 185, + PrimType_LayerNormGrad = 186, + PrimType_ResizeGrad = 187, + PrimType_Splice = 188, + PrimType_LogSoftmax = 189, + PrimType_MIN = PrimType_NONE, + PrimType_MAX = PrimType_LogSoftmax + 1 +}; + +void RegInfer(int prim_type, InferShape func); + +#ifdef MS_COMPILE_IOS +#define REG_INFER(op, type, func) \ + void _##op##type() { RegInfer(type, func); } +#else +#define REG_INFER(op, type, func) \ + __attribute__((constructor(102))) void Reg##op##Infer() { RegInfer(type, func); } +#endif +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_INFER_INFER_REGISTER_H_ diff --git a/mindspore/lite/nnacl/infer/invert_permutation_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/invert_permutation_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/invert_permutation_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/invert_permutation_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/invert_permutation_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/invert_permutation_infer.h new file mode 100644 index 0000000000..580ba06fbc --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/invert_permutation_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INVERT_PERMUTATION_INFER_H +#define MINDSPORE_NNACL_INVERT_PERMUTATION_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int InvertPermutationInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_INVERT_PERMUTATION_INFER_H diff --git a/mindspore/lite/nnacl/infer/layer_norm_grad_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/layer_norm_grad_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/layer_norm_grad_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/layer_norm_grad_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/layer_norm_grad_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/layer_norm_grad_infer.h new file mode 100644 index 0000000000..d554b3cd48 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/layer_norm_grad_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INFER_LAYER_NORM_GRAD_INFER_H_ +#define MINDSPORE_NNACL_INFER_LAYER_NORM_GRAD_INFER_H_ + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int LayerNormGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_INFER_LAYER_NORM_GRAD_INFER_H_ diff --git a/mindspore/lite/nnacl/infer/layer_norm_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/layer_norm_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/layer_norm_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/layer_norm_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/layer_norm_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/layer_norm_infer.h new file mode 100644 index 0000000000..03fdc42936 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/layer_norm_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_LAYER_NORM_INFER_H +#define MINDSPORE_NNACL_LAYER_NORM_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/layer_norm_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int LayerNormInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_LAYER_NORM_INFER_H diff --git a/mindspore/lite/nnacl/infer/lin_space_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lin_space_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/lin_space_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lin_space_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lin_space_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lin_space_infer.h new file mode 100644 index 0000000000..18a0d015d5 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lin_space_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_LIN_SPACE_INFER_H +#define MINDSPORE_NNACL_LIN_SPACE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int LinSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_LIN_SPACE_INFER_H diff --git a/mindspore/lite/nnacl/infer/log_softmax_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/log_softmax_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/log_softmax_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/log_softmax_infer.c diff --git a/mindspore/lite/nnacl/infer/log_softmax_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/log_softmax_infer.h similarity index 100% rename from mindspore/lite/nnacl/infer/log_softmax_infer.h rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/log_softmax_infer.h diff --git a/mindspore/lite/nnacl/infer/lsh_projection_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lsh_projection_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/lsh_projection_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lsh_projection_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lsh_projection_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lsh_projection_infer.h new file mode 100644 index 0000000000..24017cf793 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lsh_projection_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_LSH_PROJECTION_INFER_H +#define MINDSPORE_NNACL_LSH_PROJECTION_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/lsh_projection_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int LshProjectionInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_LSH_PROJECTION_INFER_H diff --git a/mindspore/lite/nnacl/infer/lstm_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lstm_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/lstm_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lstm_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lstm_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lstm_infer.h new file mode 100644 index 0000000000..c361619cd1 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/lstm_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_LSTM_INFER_H +#define MINDSPORE_NNACL_LSTM_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/lstm_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int LstmInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_LSTM_INFER_H diff --git a/mindspore/lite/nnacl/infer/matmul_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/matmul_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/matmul_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/matmul_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/matmul_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/matmul_infer.h new file mode 100644 index 0000000000..55f6b048b1 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/matmul_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_MATMUL_INFER_H +#define MINDSPORE_NNACL_MATMUL_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/matmul_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int MatmulInferShape(const TensorC *const *const inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_MATMUL_INFER_H diff --git a/mindspore/lite/nnacl/infer/max_min_grad_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/max_min_grad_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/max_min_grad_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/max_min_grad_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/max_min_grad_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/max_min_grad_infer.h new file mode 100644 index 0000000000..5a12f49dc4 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/max_min_grad_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INFER_MAX_MIN_GRAD_INFER_H_ +#define MINDSPORE_NNACL_INFER_MAX_MIN_GRAD_INFER_H_ + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int MaxMinGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_INFER_MAX_MIN_GRAD_INFER_H_ diff --git a/mindspore/lite/nnacl/infer/mean_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/mean_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/mean_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/mean_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/mean_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/mean_infer.h new file mode 100644 index 0000000000..293076c4af --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/mean_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_MEAN_INFER_H +#define MINDSPORE_NNACL_MEAN_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/reduce_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int MeanInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_MEAN_INFER_H diff --git a/mindspore/lite/nnacl/infer/merge_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/merge_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/merge_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/merge_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/merge_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/merge_infer.h new file mode 100644 index 0000000000..372138d0f1 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/merge_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_MERGE_INFER_H +#define MINDSPORE_NNACL_MERGE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/softmax_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int MergeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_MERGE_INFER_H diff --git a/mindspore/lite/nnacl/infer/mfcc_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/mfcc_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/mfcc_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/mfcc_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/mfcc_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/mfcc_infer.h new file mode 100644 index 0000000000..5f7b4b0bc5 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/mfcc_infer.h @@ -0,0 +1,36 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_MFCC_INFER_H +#define MINDSPORE_NNACL_MFCC_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct MfccParameter { + OpParameter op_parameter_; + int dct_coeff_num_; +} MfccParameter; + +int MfccInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_MFCC_INFER_H diff --git a/mindspore/lite/nnacl/infer/non_max_suppression_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/non_max_suppression_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/non_max_suppression_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/non_max_suppression_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/non_max_suppression_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/non_max_suppression_infer.h new file mode 100644 index 0000000000..19575888dd --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/non_max_suppression_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_NON_MAX_SUPPRESSION_INFER_H +#define MINDSPORE_NNACL_NON_MAX_SUPPRESSION_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int NonMaxSuppressionInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_NON_MAX_SUPPRESSION_INFER_H diff --git a/mindspore/lite/nnacl/infer/one_hot_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/one_hot_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/one_hot_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/one_hot_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/one_hot_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/one_hot_infer.h new file mode 100644 index 0000000000..b5ed83ec0a --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/one_hot_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_ONE_HOT_INFER_H +#define MINDSPORE_NNACL_ONE_HOT_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/one_hot_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int OneHotInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_ONE_HOT_INFER_H diff --git a/mindspore/lite/nnacl/infer/pad_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pad_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/pad_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pad_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pad_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pad_infer.h new file mode 100644 index 0000000000..e277b9865e --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pad_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_PAD_INFER_H +#define MINDSPORE_NNACL_PAD_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/pad_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int PadInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_PAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/partial_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/partial_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/partial_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/partial_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/partial_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/partial_infer.h new file mode 100644 index 0000000000..f4a9702956 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/partial_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_PARTIAL_INFER_H +#define MINDSPORE_NNACL_PARTIAL_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/softmax_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int PartialInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_PARTIAL_INFER_H diff --git a/mindspore/lite/nnacl/infer/pooling_grad_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pooling_grad_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/pooling_grad_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pooling_grad_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pooling_grad_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pooling_grad_infer.h new file mode 100644 index 0000000000..016ece352b --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pooling_grad_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_POOLING_GRAD_INFER_H +#define MINDSPORE_NNACL_POOLING_GRAD_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/pooling_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int PoolingGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_POOLING_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/pooling_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pooling_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/pooling_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pooling_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pooling_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pooling_infer.h new file mode 100644 index 0000000000..c4a9369b58 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/pooling_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_POOLING_INFER_H +#define MINDSPORE_NNACL_POOLING_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/pooling_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int PoolingInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_POOLING_INFER_H diff --git a/mindspore/lite/nnacl/infer/power_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/power_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/power_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/power_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/power_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/power_infer.h new file mode 100644 index 0000000000..23ca514562 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/power_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_POWER_INFER_H +#define MINDSPORE_NNACL_POWER_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/power_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int PowerInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_POWER_INFER_H diff --git a/mindspore/lite/nnacl/infer/prior_box_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/prior_box_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/prior_box_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/prior_box_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/prior_box_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/prior_box_infer.h new file mode 100644 index 0000000000..80b5af6db8 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/prior_box_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_PRIOR_BOX_INFER_H +#define MINDSPORE_NNACL_PRIOR_BOX_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/prior_box_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int PriorBoxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_PRIOR_BOX_INFER_H diff --git a/mindspore/lite/nnacl/infer/quant_dtype_cast_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/quant_dtype_cast_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/quant_dtype_cast_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/quant_dtype_cast_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/quant_dtype_cast_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/quant_dtype_cast_infer.h new file mode 100644 index 0000000000..bf854ba717 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/quant_dtype_cast_infer.h @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_QUANT_DTYPE_CAST_INFER_H +#define MINDSPORE_NNACL_QUANT_DTYPE_CAST_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct QuantDtypeCastParameter { + OpParameter op_parameter_; + int srcT_; // deprecated + int dstT_; +} QuantDtypeCastParameter; + +int QuantDtypeCastInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_QUANT_DTYPE_CAST_INFER_H diff --git a/mindspore/lite/nnacl/infer/random_standard_normal_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/random_standard_normal_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/random_standard_normal_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/random_standard_normal_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/random_standard_normal_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/random_standard_normal_infer.h new file mode 100644 index 0000000000..f769e4b9c2 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/random_standard_normal_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_RANDOM_STANDARD_NORMAL_INFER_H +#define MINDSPORE_NNACL_RANDOM_STANDARD_NORMAL_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int RandomStandardNormalInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_RANDOM_STANDARD_NORMAL_INFER_H diff --git a/mindspore/lite/nnacl/infer/range_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/range_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/range_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/range_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/range_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/range_infer.h new file mode 100644 index 0000000000..8ee07f50ea --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/range_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_RANGE_INFER_H +#define MINDSPORE_NNACL_RANGE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/range_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int RangeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_RANGE_INFER_H diff --git a/mindspore/lite/nnacl/infer/rank_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/rank_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/rank_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/rank_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/rank_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/rank_infer.h new file mode 100644 index 0000000000..ab2d8af3c7 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/rank_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_RANK_INFER_H +#define MINDSPORE_NNACL_RANK_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int RankInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_RANK_INFER_H diff --git a/mindspore/lite/nnacl/infer/reduce_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/reduce_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/reduce_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/reduce_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/reduce_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/reduce_infer.h new file mode 100644 index 0000000000..abe278b253 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/reduce_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_REDUCE_INFER_H +#define MINDSPORE_NNACL_REDUCE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/reduce_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ReduceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_REDUCE_INFER_H diff --git a/mindspore/lite/nnacl/infer/reshape_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/reshape_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/reshape_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/reshape_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/reshape_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/reshape_infer.h new file mode 100644 index 0000000000..484eecdcc2 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/reshape_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_RESHAPE_INFER_H +#define MINDSPORE_NNACL_RESHAPE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/reshape_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ReshapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_RESHAPE_INFER_H diff --git a/mindspore/lite/nnacl/infer/resize_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/resize_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/resize_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/resize_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/resize_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/resize_infer.h new file mode 100644 index 0000000000..aeef0a69e1 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/resize_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_RESIZE_INFER_H +#define MINDSPORE_NNACL_RESIZE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/resize_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ResizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_RESIZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/rfft_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/rfft_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/rfft_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/rfft_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/rfft_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/rfft_infer.h new file mode 100644 index 0000000000..19ef61ffee --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/rfft_infer.h @@ -0,0 +1,36 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_RFFT_INFER_H +#define MINDSPORE_NNACL_RFFT_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct RfftParameter { + OpParameter op_parameter_; + int fft_length_; +} RfftParameter; + +int RfftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_RFFT_INFER_H diff --git a/mindspore/lite/nnacl/infer/roi_pooling_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/roi_pooling_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/roi_pooling_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/roi_pooling_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/roi_pooling_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/roi_pooling_infer.h new file mode 100644 index 0000000000..4f730eb64b --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/roi_pooling_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_ROI_POOLING_INFER_H +#define MINDSPORE_NNACL_ROI_POOLING_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/roi_pooling_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ROIPoolingInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_ROI_POOLING_INFER_H diff --git a/mindspore/lite/nnacl/infer/scatter_nd_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/scatter_nd_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/scatter_nd_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/scatter_nd_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/scatter_nd_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/scatter_nd_infer.h new file mode 100644 index 0000000000..699405e831 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/scatter_nd_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SCATTER_ND_INFER_H +#define MINDSPORE_NNACL_SCATTER_ND_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/softmax_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ScatterNdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_SCATTER_ND_INFER_H diff --git a/mindspore/lite/nnacl/infer/select_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/select_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/select_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/select_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/select_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/select_infer.h new file mode 100644 index 0000000000..0f94c22e8d --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/select_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SELECT_INFER_H +#define MINDSPORE_NNACL_SELECT_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SelectInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_SELECT_INFER_H diff --git a/mindspore/lite/nnacl/infer/sgd_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sgd_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/sgd_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sgd_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sgd_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sgd_infer.h new file mode 100644 index 0000000000..cb32363bce --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sgd_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SGD_INFER_H +#define MINDSPORE_NNACL_SGD_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SgdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_SGD_INFER_H diff --git a/mindspore/lite/nnacl/infer/shape_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/shape_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/shape_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/shape_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/shape_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/shape_infer.h new file mode 100644 index 0000000000..99dd285b9a --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/shape_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SHAPE_INFER_H +#define MINDSPORE_NNACL_SHAPE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ShapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_SHAPE_INFER_H diff --git a/mindspore/lite/nnacl/infer/size_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/size_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/size_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/size_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/size_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/size_infer.h new file mode 100644 index 0000000000..23481b6920 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/size_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SIZE_INFER_H +#define MINDSPORE_NNACL_SIZE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_SIZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/skip_gram_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/skip_gram_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/skip_gram_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/skip_gram_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/skip_gram_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/skip_gram_infer.h new file mode 100644 index 0000000000..7af14f57ff --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/skip_gram_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SKIP_GRAM_INFER_H +#define MINDSPORE_NNACL_SKIP_GRAM_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SkipGramInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_SKIP_GRAM_INFER_H diff --git a/mindspore/lite/nnacl/infer/slice_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/slice_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/slice_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/slice_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/slice_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/slice_infer.h new file mode 100644 index 0000000000..50e06cd487 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/slice_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SLICE_INFER_H +#define MINDSPORE_NNACL_SLICE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/slice_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_SLICE_INFER_H diff --git a/mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/softmax_cross_entropy_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/softmax_cross_entropy_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/softmax_cross_entropy_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/softmax_cross_entropy_infer.h new file mode 100644 index 0000000000..c5d89519ef --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/softmax_cross_entropy_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SOFTMAX_CROSS_ENTROPY_INFER_H +#define MINDSPORE_NNACL_SOFTMAX_CROSS_ENTROPY_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SoftmaxCrossEntropyInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_SOFTMAX_ENTROPY_INFER_H diff --git a/mindspore/lite/nnacl/infer/softmax_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/softmax_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/softmax_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/softmax_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/softmax_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/softmax_infer.h new file mode 100644 index 0000000000..9ce561666f --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/softmax_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SOFTMAX_INFER_H +#define MINDSPORE_NNACL_SOFTMAX_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/softmax_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SoftMaxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_SOFTMAX_INFER_H diff --git a/mindspore/lite/nnacl/infer/space_to_batch_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_batch_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/space_to_batch_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_batch_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_batch_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_batch_infer.h new file mode 100644 index 0000000000..393a958c68 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_batch_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SPACE_TO_BATCH_INFER_H +#define MINDSPORE_NNACL_SPACE_TO_BATCH_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/space_to_batch_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SpaceToBatchInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_SPACE_TO_BATCH_INFER_H diff --git a/mindspore/lite/nnacl/infer/space_to_batch_nd_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_batch_nd_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/space_to_batch_nd_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_batch_nd_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_batch_nd_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_batch_nd_infer.h new file mode 100644 index 0000000000..d63b46d480 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_batch_nd_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SPACE_TO_BATCH_ND_INFER_H +#define MINDSPORE_NNACL_SPACE_TO_BATCH_ND_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/space_to_batch_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SpaceToBatchNdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_SPACE_TO_BATCH_ND_INFER_H diff --git a/mindspore/lite/nnacl/infer/space_to_depth_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_depth_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/space_to_depth_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_depth_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_depth_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_depth_infer.h new file mode 100644 index 0000000000..809a637ccc --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/space_to_depth_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SPACE_TO_DEPTH_INFER_H +#define MINDSPORE_NNACL_SPACE_TO_DEPTH_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/space_to_depth_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SpaceToDepthInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_SPACE_TO_DEPTH_INFER_H diff --git a/mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.h new file mode 100644 index 0000000000..614d1436af --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INFER_SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_INFER_H_ +#define MINDSPORE_NNACL_INFER_SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_INFER_H_ + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SparseSoftmaxCrossEntropyWithLogitsInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_INFER_SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_INFER_H_ diff --git a/mindspore/lite/nnacl/infer/sparse_to_dense_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sparse_to_dense_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/sparse_to_dense_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sparse_to_dense_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sparse_to_dense_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sparse_to_dense_infer.h new file mode 100644 index 0000000000..a520734e56 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/sparse_to_dense_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SPACE_TO_DENSE_INFER_H +#define MINDSPORE_NNACL_SPACE_TO_DENSE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SparseToDenseInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_SPACE_TO_DENSE_INFER_H diff --git a/mindspore/lite/nnacl/infer/splice_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/splice_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/splice_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/splice_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/splice_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/splice_infer.h new file mode 100644 index 0000000000..54b6d3a1e9 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/splice_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INFER_SPLICE_INFER_H_ +#define MINDSPORE_NNACL_INFER_SPLICE_INFER_H_ +#include "nnacl/infer/common_infer.h" +#include "nnacl/splice_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SpliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_INFER_SPLICE_INFER_H_ diff --git a/mindspore/lite/nnacl/infer/split_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/split_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/split_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/split_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/split_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/split_infer.h new file mode 100644 index 0000000000..67733fa841 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/split_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SPLIT_INFER_H +#define MINDSPORE_NNACL_SPLIT_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/split_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SplitInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_SPLIT_INFER_H diff --git a/mindspore/lite/nnacl/infer/squeeze_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/squeeze_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/squeeze_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/squeeze_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/squeeze_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/squeeze_infer.h new file mode 100644 index 0000000000..6505b573c9 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/squeeze_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SQUEEZE_INFER_H +#define MINDSPORE_NNACL_SQUEEZE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/squeeze_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SqueezeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_SQUEEZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/stack_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/stack_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/stack_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/stack_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/stack_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/stack_infer.h new file mode 100644 index 0000000000..d50a0ccb68 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/stack_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_STACK_INFER_H +#define MINDSPORE_NNACL_STACK_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/stack_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int StackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_STACK_INFER_H diff --git a/mindspore/lite/nnacl/infer/strided_slice_grad_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/strided_slice_grad_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/strided_slice_grad_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/strided_slice_grad_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/strided_slice_grad_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/strided_slice_grad_infer.h new file mode 100644 index 0000000000..db25253124 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/strided_slice_grad_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_STRIDED_SLICE_GRAD_INFER_H +#define MINDSPORE_NNACL_STRIDED_SLICE_GRAD_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/strided_slice_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int StridedSliceGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_STRIDED_SLICE_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/strided_slice_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/strided_slice_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/strided_slice_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/strided_slice_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/strided_slice_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/strided_slice_infer.h new file mode 100644 index 0000000000..de3321b444 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/strided_slice_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_STRIDED_SLICE_INFER_H +#define MINDSPORE_NNACL_STRIDED_SLICE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/strided_slice_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int StridedSliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_STRIDED_SLICE_INFER_H diff --git a/mindspore/lite/nnacl/infer/switch_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/switch_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/switch_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/switch_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/switch_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/switch_infer.h new file mode 100644 index 0000000000..bac22b3a16 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/switch_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_SWITCH_INFER_H +#define MINDSPORE_NNACL_SWITCH_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/softmax_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SwitchInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_SWITCH_INFER_H diff --git a/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_fromtensor_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_fromtensor_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_fromtensor_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_fromtensor_infer.h new file mode 100644 index 0000000000..f9d9a09167 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_fromtensor_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_TENSORLIST_FROMTENSOR_INFER_H +#define MINDSPORE_NNACL_TENSORLIST_FROMTENSOR_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int TensorListFromTensorInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_TENSORLIST_FROMTENSOR_INFER_H diff --git a/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_getitem_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/tensorlist_getitem_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_getitem_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_getitem_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_getitem_infer.h new file mode 100644 index 0000000000..107fdd46e3 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_getitem_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_TENSORLIST_GETITEM_INFER_H +#define MINDSPORE_NNACL_TENSORLIST_GETITEM_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/tensorlist_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int TensorListGetItemInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_TENSORLIST_GETITEM_INFER_H diff --git a/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_reserve_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/tensorlist_reserve_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_reserve_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_reserve_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_reserve_infer.h new file mode 100644 index 0000000000..f1c5ce4cd5 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_reserve_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_TENSORLIST_RESERVE_INFER_H +#define MINDSPORE_NNACL_TENSORLIST_RESERVE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int TensorListReserveInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_TENSORLIST_RESERVE_INFER_H diff --git a/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_setitem_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/tensorlist_setitem_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_setitem_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_setitem_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_setitem_infer.h new file mode 100644 index 0000000000..a73773c5d9 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_setitem_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_TENSORLIST_SETITEM_INFER_H +#define MINDSPORE_NNACL_TENSORLIST_SETITEM_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int TensorListSetItemInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_TENSORLIST_SETITEM_INFER_H diff --git a/mindspore/lite/nnacl/infer/tensorlist_stack_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_stack_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/tensorlist_stack_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_stack_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_stack_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_stack_infer.h new file mode 100644 index 0000000000..ad991d66a6 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_stack_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_TENSORLIST_STACK_INFER_H +#define MINDSPORE_NNACL_TENSORLIST_STACK_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int TensorListStackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_TENSORLIST_STACK_INFER_H diff --git a/mindspore/lite/nnacl/infer/tile_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tile_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/tile_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tile_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tile_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tile_infer.h new file mode 100644 index 0000000000..26d67610d1 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tile_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_TILE_INFER_H +#define MINDSPORE_NNACL_TILE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/base/tile_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int TileInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_TILE_INFER_H diff --git a/mindspore/lite/nnacl/infer/topk_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/topk_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/topk_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/topk_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/topk_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/topk_infer.h new file mode 100644 index 0000000000..c5f111ee25 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/topk_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_TOPK_INFER_H +#define MINDSPORE_NNACL_TOPK_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/topk_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int TopKInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_TOPK_INFER_H diff --git a/mindspore/lite/nnacl/infer/transpose_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/transpose_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/transpose_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/transpose_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/transpose_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/transpose_infer.h new file mode 100644 index 0000000000..b29efc2fdc --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/transpose_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_TRANSPOSE_INFER_H +#define MINDSPORE_NNACL_TRANSPOSE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/transpose.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int TransposeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_TRANSPOSE_INFER_H diff --git a/mindspore/lite/nnacl/infer/uniform_real_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/uniform_real_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/uniform_real_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/uniform_real_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/uniform_real_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/uniform_real_infer.h new file mode 100644 index 0000000000..7030b609d5 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/uniform_real_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_UNIFORM_REAL_INFER_H +#define MINDSPORE_NNACL_UNIFORM_REAL_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int UniformRealInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_UNIFORM_REAL_INFER_H diff --git a/mindspore/lite/nnacl/infer/unique_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unique_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/unique_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unique_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unique_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unique_infer.h new file mode 100644 index 0000000000..1c99b86fbf --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unique_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_UNIQUE_INFER_H +#define MINDSPORE_NNACL_UNIQUE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int UniqueInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_UNIQUE_INFER_H diff --git a/mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unsorted_segment_sum_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unsorted_segment_sum_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unsorted_segment_sum_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unsorted_segment_sum_infer.h new file mode 100644 index 0000000000..1382d611e4 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unsorted_segment_sum_infer.h @@ -0,0 +1,36 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_UNSORTED_SEGMENT_SUM_INFER_H +#define MINDSPORE_NNACL_UNSORTED_SEGMENT_SUM_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct UnsortedSegmentSumParameter { + OpParameter op_parameter_; + int segments_num_; +} UnsortedSegmentSumParameter; + +int UnsortedSegmentSumInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_UNSORTED_SEGMENT_SUM_INFER_H diff --git a/mindspore/lite/nnacl/infer/unsqueeze_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unsqueeze_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/unsqueeze_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unsqueeze_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unsqueeze_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unsqueeze_infer.h new file mode 100644 index 0000000000..73cf816275 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unsqueeze_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_UNSQUEEZE_INFER_H +#define MINDSPORE_NNACL_UNSQUEEZE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/unsqueeze_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int UnsqueezeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_UNSQUEEZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/unstack_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unstack_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/unstack_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unstack_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unstack_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unstack_infer.h new file mode 100644 index 0000000000..a97da806ff --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/unstack_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_UNSTACK_INFER_H +#define MINDSPORE_NNACL_UNSTACK_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/unstack_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int UnstackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_UNSTACK_INFER_H diff --git a/mindspore/lite/nnacl/infer/where_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/where_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/where_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/where_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/where_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/where_infer.h new file mode 100644 index 0000000000..5ce524675a --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/where_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_WHERE_INFER_H +#define MINDSPORE_NNACL_WHERE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int WhereInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_WHERE_INFER_H diff --git a/mindspore/lite/nnacl/infer/while_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/while_infer.c similarity index 100% rename from mindspore/lite/nnacl/infer/while_infer.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/while_infer.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/while_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/while_infer.h new file mode 100644 index 0000000000..9dd0be2a4d --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/while_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_WHILE_INFER_H +#define MINDSPORE_NNACL_WHILE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int WhileInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_WHILE_INFER_H diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/instance_norm_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/instance_norm_parameter.h new file mode 100644 index 0000000000..142f968f6c --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/instance_norm_parameter.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INSTANCE_NORM_PARAMETER_H_ +#define MINDSPORE_NNACL_INSTANCE_NORM_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct InstanceNormParameter { + // Primitive parameter + OpParameter op_parameter_; + float epsilon_; + // shape correlative + int batch_; + int channel_; + int inner_size_; +} InstanceNormParameter; + +#endif // MINDSPORE_NNACL_INSTANCE_NORM_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/int8/add_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/add_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/add_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/add_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/add_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/add_int8.h new file mode 100644 index 0000000000..ae87ad550b --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/add_int8.h @@ -0,0 +1,73 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_ADD_INT8_H_ +#define MINDSPORE_NNACL_ADD_INT8_H_ + +#ifdef ENABLE_AVX +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/arithmetic.h" +#include "nnacl/int8/arithmetic_int8.h" + +typedef struct AddQuantQrgs { + int32_t zp_; + int32_t left_shift_; + int32_t right_shift_; + int32_t multiplier_; +} AddQuantQrgs; + +typedef struct AddQuantParameter { + int left_shift_; + int32_t min_; + int32_t max_; + + AddQuantQrgs in0_args_; + AddQuantQrgs in1_args_; + + int32_t out_zp_; + int32_t out_left_shift_; + int32_t out_right_shift_; + int32_t out_multiplier_; +} AddQuantParameter; + +#ifdef __cplusplus +extern "C" { +#endif + +void AddInt8(const int8_t *input0, const int8_t *input1, int8_t *output, int size, AddQuantParameter *params); + +void AddOptInt8(const int8_t *ptr_in, const int8_t element_in, int8_t *output, int size, AddQuantParameter *params, + AddQuantQrgs *ptr_args, AddQuantQrgs *ele_args); + +int ElementAddInt8(const int8_t *in0, const int8_t *in1, int8_t *out, int size); + +int BroadcastAddInt8(const int8_t *in0, const int8_t *in1, int8_t *tile_in0, int8_t *tile_in1, int8_t *out, int size, + ArithmeticParameter *param); + +#ifdef ENABLE_AVX +void AddInt8_AVX2(const int8_t *input0, const int8_t *input1, int8_t *output, int size, AddQuantParameter *params); + +void AddOptInt8_AVX2(const int8_t *ptr_in, const int8_t element_in, int8_t *output, int size, AddQuantParameter *params, + AddQuantQrgs *ptr_args, AddQuantQrgs *ele_args); +#endif +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_ADD_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/arg_min_max_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arg_min_max_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/arg_min_max_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arg_min_max_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arg_min_max_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arg_min_max_int8.h new file mode 100644 index 0000000000..c4770398c9 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arg_min_max_int8.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INT8_ARG_MIN_MAX_INT8_H_ +#define MINDSPORE_NNACL_INT8_ARG_MIN_MAX_INT8_H_ + +#include "nnacl/arg_min_max_parameter.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void Int8ArgMinMaxQuant(const int8_t *input, int8_t *output, const int *in_shape, ArgMinMaxParameter *param, + QuantArg *in_quant, QuantArg *out_quant); +void Int8ArgMinMaxDim0(const int8_t *input, int8_t *output, const int *in_shape, ArgMinMaxParameter *param, + QuantArg *in_quant, QuantArg *out_quant); +void Int8ArgMinMaxDim1(const int8_t *input, int8_t *output, const int *in_shape, ArgMinMaxParameter *param, + QuantArg *in_quant, QuantArg *out_quant); +void Int8ArgMinMaxDim2(const int8_t *input, int8_t *output, const int *in_shape, ArgMinMaxParameter *param, + QuantArg *in_quant, QuantArg *out_quant); +void Int8ArgMinMaxDim3(const int8_t *input, int8_t *output, const int *in_shape, ArgMinMaxParameter *param, + QuantArg *in_quant, QuantArg *out_quant); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_ARG_MIN_MAX_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/arithmetic_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arithmetic_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/arithmetic_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arithmetic_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arithmetic_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arithmetic_int8.h new file mode 100644 index 0000000000..b97675245c --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arithmetic_int8.h @@ -0,0 +1,51 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INT8_ARITHMETIC_INT8_H_ +#define MINDSPORE_NNACL_INT8_ARITHMETIC_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" +#include "nnacl/base/arithmetic_base.h" + +#ifdef __cplusplus +extern "C" { +#endif +void TileOneDimensionInt8(const int8_t *inData, int8_t *outData, int dim, size_t ndim, const int *inShape, + const int *inStrides, const int *outStrides, const int *multiple); +void TileDimensionsInt8(const int8_t *data0, const int8_t *data1, int8_t *tile_data0, int8_t *tile_data1, + ArithmeticParameter *param); + +int ElementNotEqualInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, + ArithmeticQuantArg *quant_arg); + +int ElementEqualInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, ArithmeticQuantArg *quant_arg); + +int ElementLessInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, ArithmeticQuantArg *quant_arg); + +int ElementLessEqualInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, + ArithmeticQuantArg *quant_arg); + +int ElementGreaterInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, + ArithmeticQuantArg *quant_arg); + +int ElementGreaterEqualInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, + ArithmeticQuantArg *quant_arg); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_ARITHMETIC_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/arithmetic_self_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arithmetic_self_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/arithmetic_self_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arithmetic_self_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arithmetic_self_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arithmetic_self_int8.h new file mode 100644 index 0000000000..9e991969ca --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/arithmetic_self_int8.h @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_ARITHMETIC_SELF_INT8_H_ +#define MINDSPORE_NNACL_INT8_ARITHMETIC_SELF_INT8_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int Int8ElementRound(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementFloor(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementCeil(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementAbs(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementSin(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementCos(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementLog(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementSqrt(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementRsqrt(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementSquare(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementLogicalNot(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +int Int8ElementReciprocal(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_ARITHMETIC_SELF_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/batch_to_space_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/batch_to_space_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/batch_to_space_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/batch_to_space_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/batch_to_space_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/batch_to_space_int8.h new file mode 100644 index 0000000000..a04ad5c465 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/batch_to_space_int8.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INT8_BATCH_TO_SPACE_INT8_H_ +#define MINDSPORE_NNACL_INT8_BATCH_TO_SPACE_INT8_H_ +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif +void BatchToSpaceNoCropForNHWCInt8(const int8_t *input, int8_t *output, const int *in_shape, int out_n, + const int *block, QuantArg *in_quant_arg, QuantArg *out_quant_arg); +void BatchToSpaceForNHWCInt8(const int8_t *input, int8_t *output, const int *in_shape, int out_n, const int *block, + const int *crops, QuantArg *in_quant_arg, QuantArg *out_quant_arg); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_BATCH_TO_SPACE_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/batchnorm_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/batchnorm_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/batchnorm_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/batchnorm_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/batchnorm_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/batchnorm_int8.h new file mode 100644 index 0000000000..8f92cc31c3 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/batchnorm_int8.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_BATCHNORM_H_ +#define MINDSPORE_NNACL_INT8_BATCHNORM_H_ + +#include "nnacl/op_base.h" +#include "nnacl/batchnorm_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void BatchNormInt8(int8_t *output_ptr, const int8_t *input_ptr, const float *alpha_ptr, const float *beta_ptr, + int task_id, BatchNormParameter *param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_BATCHNORM_H_ diff --git a/mindspore/lite/nnacl/int8/common_func_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/common_func_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/common_func_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/common_func_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/common_func_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/common_func_int8.h new file mode 100644 index 0000000000..42371096b9 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/common_func_int8.h @@ -0,0 +1,94 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_COMMON_FUNC_H_ +#define MINDSPORE_NNACL_INT8_COMMON_FUNC_H_ + +#include +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void PostFuncInt8C4(const int32_t *in, const int32_t *bias, int8_t *out, size_t oc, size_t plane, size_t stride, + int32_t multiplier, int32_t left_shift, int32_t right_shift, int32_t zp, int32_t mini, + int32_t maxi); +#ifdef ENABLE_ARM +void ConvDwInt8Row(int32_t *output_ptr, const int8_t *input_ptr, const int16_t *weight_ptr, int num_pixels, + int output_channel, int input_step, int8_t input_zp); +void ConvDwInt8PostAlign4PerChannel(int8_t *dst, int32_t *buffer, int channel4, int32_t output_zp, + int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, int32_t acc_min, + int32_t acc_max); +void ConvDwInt8PostAlign4(int8_t *dst, int32_t *buffer, int num_pixels, int32_t output_zp, int32_t out_multiplier, + int32_t left_shift, int32_t right_shift, int32_t acc_min, int32_t acc_max); +void IndirectGemmInt16to32_8x4(int32_t *dst, const int16_t *src, const int16_t *weight, size_t ksize, size_t ic8, + size_t oc4, size_t offset); +void ConvDwInt8Center(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, size_t height, + size_t width, size_t kernel_h, size_t kernel_w, size_t out_h_step, size_t block_channel, + size_t in_sh_step, size_t in_sw_step, size_t in_kh_step, size_t in_kw_step, int8_t *in_zp, + int32_t *out_zp, int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, + int32_t *acc_min, int32_t *acc_max); +void DeconvDwInt8Center(int32_t *dst, const int16_t *src, const int16_t *weight, size_t height, size_t width, + size_t kernel_h, size_t kernel_w, size_t out_h_step, size_t block_channel, size_t in_sh_step, + size_t in_sw_step, size_t in_kh_step, size_t in_kw_step); +void DeconvDwInt8Post(int8_t *dst, int32_t *output_buffer, const int32_t *bias, int block_channel, int pixel_nums, + int out_multiplier, int left_shift, int right_shift, int32_t out_zp, int32_t acc_min, + int32_t acc_max); +int16x8_t LoadAndAddOffset(int8_t *data, int index, int offset); +int32x4_t ClacScaledInput(int32x4_t input, int32x4_t left_shift_result_vec, int32x4_t input_multiplier_vec, + int32x4_t right_shift_vec); +#endif + +#ifdef ENABLE_ARM32 +void ConvDw3x3Int8BorderPixel(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, int height, + int width, int in_kh_step, int in_kw_step, int channel, int8_t in_zp, int32_t out_zp, + int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, int32_t acc_min, + int32_t acc_max, size_t per_channel); +#endif + +#ifdef ENABLE_ARM64 +void PostFuncInt8C4Neon64(const int32_t *in, const int32_t *bias, int8_t *out, size_t oc4div, size_t oc4res, + size_t plane, size_t stride, int32_t multiplier, int32_t left_shift, int32_t right_shift, + int32_t zp, int32_t mini, int32_t maxi); +void ConvDw3x3Int8Neon64(int8_t *output, const int8_t *input, const int16_t *weight, const int32_t *bias, + int input_col_size, int input_row_size, int channel, int output_h, int output_w, int8_t in_zp, + int32_t out_zp, int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, + int32_t acc_min, int32_t acc_max, size_t per_channel); +void ConvDw3x3Int8Stride2(int8_t *output, const int8_t *input, const int16_t *weight, const int32_t *bias, + int input_col_size, int input_row_size, int channel, int output_h, int output_w, int8_t in_zp, + int32_t out_zp, int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, + int32_t acc_min, int32_t acc_max, size_t per_channel); +void ConvDw3x3Int8Corner(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, size_t in_kh_step, + size_t in_kw_step, size_t channel, size_t in_zp, size_t out_zp, int32_t *out_multiplier, + int32_t *left_shift, int32_t *right_shift, size_t acc_min, size_t acc_max, size_t per_channel); +void ConvDw3x3Int8Vertical(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, + size_t in_kh_step, size_t in_kw_step, size_t channel, size_t in_zp, size_t out_zp, + int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, size_t acc_min, + size_t acc_max, size_t per_channel); +void ConvDw3x3Int8Horizontal(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, + size_t in_kh_step, size_t in_kw_step, size_t channel, size_t in_zp, size_t out_zp, + int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, size_t acc_min, + size_t acc_max, size_t per_channel); +#endif +#ifdef __cplusplus +} +#endif + +#endif /* MINDSPORE_NNACL_FP32_COMMON_FUNC_H_ */ diff --git a/mindspore/lite/nnacl/int8/concat_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/concat_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/concat_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/concat_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/concat_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/concat_int8.h new file mode 100644 index 0000000000..59136add39 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/concat_int8.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_CONCAT_INT8_H_ +#define MINDSPORE_NNACL_INT8_CONCAT_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/concat_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Int8Concat(int8_t **inputs, int8_t *output_ptr, ConcatParameter *para, int axis, int64_t real_dst_count, + int task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_CONCAT_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/conv1x1_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv1x1_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/conv1x1_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv1x1_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv1x1_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv1x1_int8.h new file mode 100644 index 0000000000..f8339b5419 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv1x1_int8.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INT8_CONV1X1_INT8_H_ +#define MINDSPORE_NNACL_INT8_CONV1X1_INT8_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/common_func.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/int8/quantize.h" +#include "nnacl/matmul_parameter.h" +#include "nnacl/int8/matmul_int8.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void Conv1x1Int8(const int8_t *packed_input, const int8_t *packed_weight, int8_t *dst, const int32_t *input_sum, + const int32_t *bias, int row, int col, int deep16, int32_t *left_shift, int32_t *right_shift, + int32_t *multiplier, ConvParameter *conv_param, int32_t *filter_zp); +void Conv1x1Int8Opt(const int8_t *packed_input, const int8_t *packed_weight, int8_t *dst, const int32_t *input_sum, + const int32_t *bias, int row, int col, int deep4, int32_t *left_shift, int32_t *right_shift, + int32_t *multiplier, ConvParameter *conv_param, MATMUL_OPT_DP_FUNC matmul_func, int32_t *filter_zp); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_CONV1X1_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/conv3x3_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv3x3_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/conv3x3_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv3x3_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv3x3_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv3x3_int8.h new file mode 100644 index 0000000000..68c60b8bbd --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv3x3_int8.h @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INT8_CONV_INT8_H_ +#define MINDSPORE_NNACL_INT8_CONV_INT8_H_ + +#include +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/common_func.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/int8/fixed_point.h" +#include "nnacl/int8/quantize.h" +#include "nnacl/matmul_parameter.h" +#include "nnacl/int8/matmul_int8.h" +#include "nnacl/int8/common_func_int8.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void Conv3x3Int8FilterTransform(const int16_t *weight_data, int16_t *trans_weight, int iC8, int output_channel, + int kernel_plane); + +void Conv3x3Int8(int16_t *input_data, int16_t *transed_weight, const int32_t *bias_data, int8_t *output_data, + int16_t *tile_buffer, int16_t *block_unit_buffer, int32_t *tmp_dst_buffer, int8_t *tmp_out, + int task_id, ConvParameter *conv_param); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_CONV_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/conv_depthwise_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv_depthwise_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/conv_depthwise_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv_depthwise_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv_depthwise_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv_depthwise_int8.h new file mode 100644 index 0000000000..9da14fa27b --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv_depthwise_int8.h @@ -0,0 +1,49 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_CONV_DEPTHWISE_H_ +#define MINDSPORE_NNACL_INT8_CONV_DEPTHWISE_H_ + +#include "nnacl/conv_parameter.h" +#include "nnacl/fp32/conv_depthwise_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void ConvDwInt8(int8_t *output_data, int32_t *output_row, const int8_t *input_data, const int16_t *weight_data, + const int32_t *bias_data, const ConvParameter *conv_param, int task_id); + +void ConvDw3x3Int8Pad(int8_t *output_data, const int8_t *input_data, const int16_t *weight_data, + const int32_t *bias_data, const ConvParameter *conv_param, const SlidingWindowParam *sliding); + +void ConvDw3x3Int8(int8_t *output_data, int8_t *buffer, const int8_t *input_data, const int16_t *weight_data, + const int32_t *bias_data, const ConvParameter *conv_param, const SlidingWindowParam *sliding, + int task_id); + +void ConvDwInt8SW(int8_t *output_data, const int8_t *input_data, const int16_t *weight_data, const int32_t *bias_data, + int8_t *input_zp, int32_t *output_zp, const ConvParameter *conv_param, + const SlidingWindowParam *sliding, int task_id); + +void DeconvDwInt8(int8_t *output_data, int32_t *output_buffer, const int16_t *input_data, const int16_t *weight_data, + const int32_t *bias_data, const ConvParameter *conv_param, const SlidingWindowParam *sliding, + int task_id); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_CONV_DEPTHWISE_H_ diff --git a/mindspore/lite/nnacl/int8/conv_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/conv_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv_int8.h new file mode 100644 index 0000000000..53455f3735 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/conv_int8.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INT8_CONV_INT8_H_ +#define MINDSPORE_NNACL_INT8_CONV_INT8_H_ + +#include +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/common_func.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/int8/quantize.h" +#include "nnacl/matmul_parameter.h" +#include "nnacl/int8/matmul_int8.h" +#include "nnacl/int8/common_func_int8.h" + +#ifdef __cplusplus +extern "C" { +#endif +// int8 conv common +void ConvInt8(int8_t *input_data, int8_t *packed_input, int8_t *matmul_input, int8_t *packed_weight, + const int32_t *bias_data, int8_t *output_data, int32_t *filter_zp, int32_t *input_sum, int task_id, + ConvParameter *conv_param, MATMUL_OPT_R_FUNC matmul_func, bool is_optimize); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_CONV_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/crop_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/crop_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/crop_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/crop_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/crop_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/crop_int8.h new file mode 100644 index 0000000000..bbbad23f05 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/crop_int8.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_CROP_INT8_H_ +#define MINDSPORE_NNACL_INT8_CROP_INT8_H_ +#include "nnacl/op_base.h" +#include "nnacl/crop_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Int8Crop(const int8_t *input, int8_t *output, int task_id, CropParameter *para); +void Int8Crop1D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); +void Int8Crop2D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); +void Int8Crop3D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); +void Int8Crop4D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_CROP_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/deconv_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/deconv_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/deconv_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/deconv_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/deconv_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/deconv_int8.h new file mode 100644 index 0000000000..c78c5f6cbe --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/deconv_int8.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INT8_DECONV_H_ +#define MINDSPORE_NNACL_INT8_DECONV_H_ + +#include +#include "nnacl/pack.h" +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/common_func.h" +#include "nnacl/int8/matmul_int8.h" + +#ifdef __cplusplus +extern "C" { +#endif +void DeConvPackWeightSum(int8_t *weight, int32_t *weight_sum, int32_t input_zp, int32_t filter_zp, int deep16, int col4, + bool suppport_opt); +void DeConvPackInputSum(const int8_t *src, int32_t *dst, int32_t filter_zp, size_t row4, size_t col16, + bool suppport_opt); +void DeConvWeightTransInt8(int8_t *src, int8_t *dst, int input_channel, int output_channel, int plane, + bool support_optimize_); + +int DeConvInt8(const int8_t *input, const int8_t *weight, int32_t *output, int32_t *weight_sum, int32_t *input_sum, + size_t act_row, size_t act_col, size_t act_deep, ConvParameter *conv_param, + MATMUL_OPT_R4_FUNC matmul_func); +int DeConvPostInt8(const int32_t *src, const int32_t *bias, int32_t *tmp, int8_t *out, int output_channel, + ConvParameter *conv_param, bool support_optimize); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_DECONV_H_ diff --git a/mindspore/lite/nnacl/int8/depth_to_space_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/depth_to_space_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/depth_to_space_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/depth_to_space_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/depth_to_space_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/depth_to_space_int8.h new file mode 100644 index 0000000000..4117b78166 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/depth_to_space_int8.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INT8_DEPTH_TO_SPACE_INT8_H_ +#define MINDSPORE_NNACL_INT8_DEPTH_TO_SPACE_INT8_H_ + +#include "nnacl/depth_to_space_parameter.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif +void DepthToSpaceForNHWCInt8(const int8_t *input, int8_t *output, const int *in_shape, DepthToSpaceParameter *param, + QuantArg *in_quant_arg, QuantArg *out_quant_arg); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_DEPTH_TO_SPACE_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/div_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/div_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/div_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/div_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/div_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/div_int8.h new file mode 100644 index 0000000000..12b5e4399f --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/div_int8.h @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_DIV_INT8_H_ +#define MINDSPORE_NNACL_INT8_DIV_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/int8/quantize.h" +#include "nnacl/int8/fixed_point.h" + +#ifdef __cplusplus +extern "C" { +#endif +int DivInt8(int8_t *input0_data, int8_t *input1_data, int8_t *output_data, int64_t real_dst_count, DivQuantArg *para); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_DIV_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/fixed_point.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/fixed_point.c similarity index 100% rename from mindspore/lite/nnacl/int8/fixed_point.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/fixed_point.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/fixed_point.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/fixed_point.h new file mode 100644 index 0000000000..86c2a1be53 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/fixed_point.h @@ -0,0 +1,74 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_QUANTIZATION_FIXED_POINT_H_ +#define MINDSPORE_NNACL_QUANTIZATION_FIXED_POINT_H_ + +#include +#include +#ifdef ENABLE_NEON +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +// returns the high-32 bits of a * b with rounding +// assume that a and b is divided by 2^31, who fall into [-1, 1] +// so the mantissa of a * b is (a / 2^31) * (b / 2^31) * 2^31= (a * b) / 2^31 +// actually we compute 2 * a * b / 2^32 +// and take 32 bits of mantissa for rounding +int SaturatingRoundingDoublingHighMul(int a, int b); + +int16_t SaturatingRoundingDoublingHighMulInt16(int16_t a, int16_t b); + +// division by a 2^exponent with rounding +// or arithmetic right shift with rounding +int RoundingDivideByPOT(int x, int exponent); + +int UpwardRounding(int x, int exponent); + +int MultiplyByQuantizedMultiplier(int32_t value, int32_t multiplier, int32_t left_shift, int32_t right_shift); + +int MultiplyByQuantizedMultiplierWithUpwardRounding(int32_t value, int32_t multiplier, int32_t left_shift, + int32_t right_shift); + +int MultiplyByMultiplierAndRightShift(int32_t value, int32_t multiplier, int32_t right_shift); + +int SaturatingRoundingMultiplyByPOT(int32_t x, int exponent); + +int32_t Rescale(int x, int kIntegerBitsSrc, int kIntegerBitsDst); + +int CountLeadingSignBits(int32_t x); + +int32_t ComputerReciprocal(int32_t x, int x_digits, int *recip_shift); + +int exp_on_negative_values(int a, const int tIntegerBits); + +void GetSqrtQuantMultiplierExp(int32_t input, int reverse_shift, int32_t *multiplier, int32_t *shift); + +#ifdef __cplusplus +} +#endif + +#ifdef ENABLE_NEON +int32x4_t RoundingDivideByPOTInt32x4(int32x4_t x, int exponent); + +int32x4_t SaturatingRoundingDoublingHighMulInt32x4(int32x4_t a, int32x4_t b); +#endif + +#endif // MINDSPORE_NNACL_QUANTIZATION_FIXED_POINT_H_ diff --git a/mindspore/lite/nnacl/int8/gatherNd_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/gatherNd_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/gatherNd_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/gatherNd_int8.c diff --git a/mindspore/lite/nnacl/int8/gatherNd_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/gatherNd_int8.h similarity index 100% rename from mindspore/lite/nnacl/int8/gatherNd_int8.h rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/gatherNd_int8.h diff --git a/mindspore/lite/nnacl/int8/gather_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/gather_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/gather_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/gather_int8.c diff --git a/mindspore/lite/nnacl/int8/gather_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/gather_int8.h similarity index 100% rename from mindspore/lite/nnacl/int8/gather_int8.h rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/gather_int8.h diff --git a/mindspore/lite/nnacl/int8/hswish_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/hswish_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/hswish_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/hswish_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/hswish_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/hswish_int8.h new file mode 100644 index 0000000000..525c8e4df7 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/hswish_int8.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INT8_HSWISH_INT8_H_ +#define MINDSPORE_NNACL_INT8_HSWISH_INT8_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/int8/fixed_point.h" + +typedef struct HswishQuantArg { + double input_scale; + int32_t input_zp; + double output_scale; + int32_t output_zp; + int16_t relu6_multiplier_fixedpoint_int16; + int32_t relu6_multiplier_exponent; + int16_t output_multiplier_fixedpoint_int16; + int32_t output_multiplier_exponent; +} HswishQuantArg; + +#ifdef __cplusplus +extern "C" { +#endif +int HSwishInt8(const int8_t *src, int length, int8_t *dst, HswishQuantArg *arg); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_HSWISH_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/l2_norm_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/l2_norm_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/l2_norm_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/l2_norm_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/l2_norm_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/l2_norm_int8.h new file mode 100644 index 0000000000..28788a9a01 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/l2_norm_int8.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INT8_L2_NORM_INT8_H_ +#define MINDSPORE_NNACL_INT8_L2_NORM_INT8_H_ + +#include "nnacl/l2_norm_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int L2NormalizationInt8(const int8_t *input_data, int8_t *output_data, const L2NormParameter *param, + const L2NormQuantArg *quant_param, const int begin, const int end); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_L2_NORM_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/layer_norm_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/layer_norm_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/layer_norm_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/layer_norm_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/layer_norm_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/layer_norm_int8.h new file mode 100644 index 0000000000..d91faec657 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/layer_norm_int8.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INT8_LAYER_NORM_H_ +#define MINDSPORE_NNACL_INT8_LAYER_NORM_H_ + +#include "nnacl/errorcode.h" +#include "nnacl/layer_norm_parameter.h" +#include "nnacl/int8/fixed_point.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int LayerNormInt8(const int8_t *src_data, const float *gamma_data, const float *beta_data, int8_t *dst_data, + LayerNormParameter *param, LayerNormQuantArg *quant, int task_id); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_LAYER_NORM_H_ diff --git a/mindspore/lite/nnacl/int8/leaky_relu_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/leaky_relu_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/leaky_relu_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/leaky_relu_int8.c diff --git a/mindspore/lite/nnacl/int8/leaky_relu_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/leaky_relu_int8.h similarity index 100% rename from mindspore/lite/nnacl/int8/leaky_relu_int8.h rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/leaky_relu_int8.h diff --git a/mindspore/lite/nnacl/int8/matmul_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/matmul_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/matmul_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/matmul_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/matmul_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/matmul_int8.h new file mode 100644 index 0000000000..6efdca067d --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/matmul_int8.h @@ -0,0 +1,84 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_MATMUL_H_ +#define MINDSPORE_NNACL_INT8_MATMUL_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/matmul_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +/* 4x16 16x4 -> 4x4 */ +/* matmul */ +void MatMulInt8_16x4(const int8_t *a, const int8_t *b, int *dst, int row_4, int col_4, int deep_16, + const int *input_sum, const int *bias); +void RowMajor2Row16x4MajorInt8(int8_t *src_ptr, int8_t *dst_ptr, int row, int col); +void RowMajor2Col16x4MajorInt8(int8_t *src, int row, int col, int8_t *dst); +void CalcInputSums(int8_t *input, int row, int col, int weight_zp, int *dst, DataOrder order); +void CalcWeightBiasSums(int8_t *weight, int row, int col, int input_zp, int *weight_zp_ptr, const int *bias, int *dst, + DataOrder order, bool filter_per_channel); +void MatmulInt8Opt(const int8_t *a, const int8_t *b, int8_t *dst, int row, int col, int deep16, const int *a_sums, + const int *bias, int act_min, int act_max, int out_zp, int32_t *multiplier, int32_t *left_shift, + int32_t *right_shift, size_t stride, size_t filter_peroc, int32_t *filter_zp); + +/* 8x4 4x8 -> 8x8 */ +/* optimize conv */ +void RowMajor2Row8x4MajorInt8(const int8_t *src_ptr, int8_t *dst_ptr, int row, int col); +void MatMulInt8_8x8_r(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4, + size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift, + int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, int32_t maxi, + size_t per_channel); + +/* 4x16 16x2 -> 4x2 */ +/* arm32 conv1x1 */ +void RowMajor2Row2x16MajorInt8(int8_t *src_ptr, int8_t *dst_ptr, int row, int col); +void RowMajor2Col16x2MajorInt8(int8_t *src_ptr, int8_t *dst_ptr, int row, int col); +void MatMulInt8_4x2_r(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_16, + size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift, + int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, int32_t maxi, + bool peroc); + +/* 4x4 4x16 -> 4x16 */ +/* optimize conv1x1 */ +void RowMajor2Row4x16MajorInt8(const int8_t *src_ptr, int8_t *dst_ptr, int row, int col); +void PackInput4x4AndInputSumPert(const int8_t *src_input, int8_t *packed_input, int32_t *input_sum, + size_t input_channel, size_t plane_size, int32_t filter_zp); +void MatMulInt8_4x16_r(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4, + size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift, + int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, int32_t maxi, + size_t per_channel, int32_t *filter_zp); + +#ifdef ENABLE_ARM64 +void MatmulInt8Neon64(const int8_t *a, const int8_t *b, int8_t *dst, int row4, int col4, int deep16, const int *a_sums, + const int *bias, int act_min, int act_max, int out_zp, int32_t *multiplier, int32_t *left_shift, + int32_t *right_shift, int row, int col, int stride, int filter_peroc); + +void MatMulR4Int8Neon64(const int8_t *a, const int8_t *b, int32_t *dst, int row4, int col4, int deep16, + const int *input_sum, const int *bias); +#endif +#ifdef ENABLE_ARM32 +void MatmulInt8Neon32(const int8_t *a, const int8_t *b, int8_t *dst, int row, int col, int deep16, + const int *input_sums, const int *weight_bias, int act_min, int act_max, int out_zp, + int *multiplier, int *left_shift, int *right_shift, int stride, int per_channel); +#endif +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_SRC_BACKEND_ARM_NNACL_INT8_MATMUL_H_ diff --git a/mindspore/lite/nnacl/int8/mul_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/mul_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/mul_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/mul_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/mul_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/mul_int8.h new file mode 100644 index 0000000000..af074cba07 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/mul_int8.h @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_MUL_INT8_H_ +#define MINDSPORE_NNACL_INT8_MUL_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/mul_parameter.h" +#include "nnacl/int8/common_func_int8.h" +#include "nnacl/int8/fixed_point.h" +#ifdef ENABLE_NEON +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif +void Mul(int8_t *input0_data, int8_t *input1_data, int8_t *output_data, int64_t real_dst_count, MulQuantArg para); +void FastMul(int8_t *input0_data, int8_t *input1_data, int8_t *output_data, int depth, int64_t real_dst_count, + bool input1_broad, MulQuantArg para); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_MUL_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/pack_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pack_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/pack_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pack_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pack_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pack_int8.h new file mode 100644 index 0000000000..dcfb2341c0 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pack_int8.h @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_PACK_INT8_H_ +#define MINDSPORE_NNACL_INT8_PACK_INT8_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/int8/matmul_int8.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void PackNHWCToNHWC4Int8(const void *src, void *dst, int batch, int plane, int channel); +void PackNHWC4ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel); +void PackNHWCToNHWC8Int8(const void *src, void *dst, int batch, int plane, int channel); +void PackNHWC8ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel); +void PackNCHWToNC8HW8Int8(const void *src, void *dst, int batch, int plane, int channel); +void PackNC4HW4ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel); +void PackNHWCToC8HWN8Int8(const void *src, void *dst, int batch, int plane, int channel); +void PackNCHWToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel); +void PackNHWCToNCHWInt8(const void *src, void *dst, int batch, int plane, int channel); + +void PackInputSum16x4Int8(const int8_t *input, int32_t *input_sum, int32_t *filter_zp, ConvParameter *conv_param); +void PackInputSum16x4PerLayer(const int8_t *src, int32_t *dst, int32_t filter_zp, size_t row4, size_t col16); +void PackInputToC8Int8(const int8_t *input_data, int16_t *packed_input, ConvParameter *conv_param); +void PackWeightToC8Int8(const int8_t *origin_weight_data, int16_t *packed_weight_data, ConvParameter *conv_param); +void Im2ColPackUnitInt8Opt(const int8_t *input_data, int8_t *packed_input, int8_t *matmul_input, int real_cal_num, + int block_index, int32_t *filter_zp, int32_t *input_sum, ConvParameter *conv_param, + bool per_channel, bool is_optimize); +#ifdef ENABLE_ARM +void PreSum4x16Int8Pert(const int8_t *src, int32_t *sum, size_t row4, size_t col16, int32_t filter_zp); +void PreSum4x16Int8Peroc(const int8_t *src, int32_t *sum, int32_t *zp, size_t hw4, size_t ic16, int32_t oc_div, + size_t oc_res, size_t stride); +#endif + +void PackDepthwiseInt8Input(const int8_t *src, int16_t *dst, const ConvParameter *conv_param); +void PackDepthwiseInt8Weight(const int8_t *origin_weight, int16_t *packed_weight_, int plane, int channel, + ConvQuantArg *quant_qrg); +void PackDeconvDepthwiseInt8Weight(const int8_t *origin_weight, int16_t *packed_weight_, int plane, int channel, + ConvQuantArg *quant_qrg); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_PAD_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/pad_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pad_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/pad_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pad_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pad_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pad_int8.h new file mode 100644 index 0000000000..b5d6433339 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pad_int8.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_PAD_INT8_H_ +#define MINDSPORE_NNACL_INT8_PAD_INT8_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/pad_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int PadConstant4D(const int8_t *in_data, int8_t *out_data, const int32_t *in_dims, const int32_t *out_dims, + const int32_t *paddings, const int tid, const int thread_num); +void MirrorPadInt8(const int8_t *input_data, int8_t *output_data, const int *input_shape, const PadParameter *pad_param, + int begin, int end); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_PAD_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/pooling_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pooling_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/pooling_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pooling_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pooling_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pooling_int8.h new file mode 100644 index 0000000000..8d7a3b831b --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pooling_int8.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_POOLING_H_ +#define MINDSPORE_NNACL_INT8_POOLING_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include "nnacl/op_base.h" +#include "nnacl/fp32/pooling_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif +#define MAX_MAXPOOL_SIZE 256 + +int AvgPoolingInt8(const int8_t *input_ptr, int8_t *output_ptr, PoolingParameter *pooling_param, int task_id); + +int AvgPoolingOptInt8(const int8_t *input_ptr, int8_t *output_ptr, PoolingParameter *pooling_param, int task_id); + +void MaxPoolingInt8(const int8_t *input_ptr, int8_t *output_ptr, PoolingParameter *pooling_param, int task_id); + +void MaxPoolingWithQuantInt8(const int8_t *input_ptr, int8_t *output_ptr, PoolingParameter *pooling_param, int task_id); + +void MaxPoolingOptInt8(const int8_t *input_ptr, int8_t *output_ptr, PoolingParameter *pooling_param, int task_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_POOLING_H_ diff --git a/mindspore/lite/nnacl/int8/power_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/power_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/power_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/power_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/power_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/power_int8.h new file mode 100644 index 0000000000..3e3c3b7d69 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/power_int8.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_POWER_INT8_H_ +#define MINDSPORE_NNACL_INT8_POWER_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/power_parameter.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif +int PowerInt8(const int8_t *input_ptr, int8_t *exp_ptr, int8_t *output_ptr, int count, PowerParameter *parameter); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_POWER_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/quant_dtype_cast_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/quant_dtype_cast_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/quant_dtype_cast_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/quant_dtype_cast_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/quant_dtype_cast_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/quant_dtype_cast_int8.h new file mode 100644 index 0000000000..f1a76ac000 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/quant_dtype_cast_int8.h @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_QUANTDTYPECAST_H_ +#define MINDSPORE_NNACL_INT8_QUANTDTYPECAST_H_ + +#include "nnacl/op_base.h" + +typedef struct QuantDTypeCastParameter { + OpParameter op_parameter_; + int32_t srcT; + int32_t dstT; +} QuantDTypeCastParameter; + +#ifdef __cplusplus +extern "C" { +#endif +int DoDequantizeInt8ToFp32(const int8_t *quant_values, float *real_values, float scale, int32_t zp, int size); +int DoQuantizeFp32ToInt8(const float *real_values, int8_t *quant_values, float scale, int32_t zp, int size, + bool uint8_flag); +int DoDequantizeUInt8ToFp32(const uint8_t *quant_values, float *real_values, float scale, int32_t zp, int size); +int DoQuantizeFp32ToUInt8(const float *real_values, uint8_t *quant_values, float scale, int32_t zp, int size); +int Int8ToUInt8(const int8_t *quant_values, uint8_t *real_values, int size); +int UInt8ToInt8(const uint8_t *real_values, int8_t *quant_values, int size); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_QUANTDTYPECAST_H_ diff --git a/mindspore/lite/nnacl/int8/quantize.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/quantize.c similarity index 100% rename from mindspore/lite/nnacl/int8/quantize.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/quantize.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/quantize.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/quantize.h new file mode 100644 index 0000000000..590c036248 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/quantize.h @@ -0,0 +1,219 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_QUANTIZATION_QUANTIZE_H_ +#define MINDSPORE_NNACL_QUANTIZATION_QUANTIZE_H_ + +#include +#include +#include "nnacl/op_base.h" + +#define INPUT_PER_CHANNEL 0b001 +#define FILTER_PER_CHANNEL 0b010 +#define OUTPUT_PER_CHANNEL 0b100 + +typedef struct ConvQuantArg { + RoundingMode round_mode_; + CalFixedMultiplierMode quant_multiplier_mode_; + QuantArg *input_quant_args_; + QuantArg *filter_quant_args_; + QuantArg *output_quant_args_; + double *real_multiplier_; + int32_t *left_shift_; + int32_t *right_shift_; + int32_t *quant_multiplier_; + int32_t *out_act_min_; + int32_t *out_act_max_; + size_t input_arg_num_; + size_t filter_arg_num_; + size_t output_arg_num_; + uint8_t per_channel_; +} ConvQuantArg; + +typedef struct ConcatQuantArg { + QuantArg *in_args_; + QuantArg out_args_; + int8_t output_activation_min_; + int8_t output_activation_max_; +} ConcatQuantArg; + +typedef struct PreluQuantArg { + int *input_sizes_; + int output_size_; + int **input_shapes_; + int *output_shape_; + size_t input_num_; + size_t output_dim_; + float alpha_; + QuantArg in_args_; + QuantArg out_args_; + int output_activation_min_; + int output_activation_max_; + QuantArg *in_quant_args_; + QuantArg out_quant_args_; +} PreluQuantArg; + +typedef struct CropQuantArg { + QuantArg in_args_; + QuantArg out_args_; + int output_activation_min_; + int output_activation_max_; +} CropQuantArg; + +typedef struct ArithSelfQuantArg { + QuantArg in_args_; + QuantArg out_args_; + int output_activation_min_; + int output_activation_max_; + int output_multiplier_; + int shift_left_; + int shift_right_; +} ArithSelfQuantArg; + +typedef struct GatherQuantArg { + double alpha_; + int zp_in_; + int zp_out_; +} GatherQuantArg; + +typedef struct SoftmaxQuantArg { + QuantArg in_quant_args_; + QuantArg out_quant_arg_; + int output_activation_min_; + int output_activation_max_; + int output_multiplier_; + int shift_left_; + int shift_right_; +} SoftmaxQuantArg; + +typedef struct SubQuantArg { + QuantArg in0_args_; + QuantArg in1_args_; + QuantArg out_args_; + int output_activation_min_; + int output_activation_max_; + int input0_multiplier_; + int input1_multiplier_; + int output_multiplier_; + int input0_shift_; + int input1_shift_; + int output_shift_; + int left_shift_result0_; + int left_shift_result1_; + int right_shift0_; + int right_shift1_; + int left_shift_out_; + int right_shift_out_; +} SubQuantArg; + +typedef struct ArithmeticQuantArg { + QuantArg in0_args_; + QuantArg in1_args_; + QuantArg out_args_; +} ArithmeticQuantArg; + +typedef struct DivQuantArg { + QuantArg in0_args_; + QuantArg in1_args_; + QuantArg out_args_; + int output_activation_min_; + int output_activation_max_; + int output_multiplier_; + int output_shift_; +} DivQuantArg; + +typedef struct ReduceQuantArg { + double in_scale_; + int32_t in_zp_; + double out_scale_; + int32_t out_zp_; + int32_t in_out_multiplier_; + int in_out_left_shift_; + int in_out_right_shift_; + int32_t mean_multiplier_; + int mean_left_shift_; + int mean_right_shift_; + int32_t prod_multiplier_; + int prod_left_shift_; + int prod_right_shift_; + int32_t sum_square_multiplier_; + int sum_square_left_shift_; + int sum_square_right_shift_; +} ReduceQuantArg; + +typedef struct LeakyReluQuantArg { + OpParameter op_parameter_; + PreluQuantArg quant_arg; + float slope_; + int64_t axis_; + int *in_shape_; + int *out_shape_; + int input_dim_; + int element_num; +} LeakyReluQuantArg; + +typedef struct ResizeQuantArg { + int32_t ratio_x_; + int32_t ratio_y_; + int32_t *x_axis_index_; + int32_t *x_axis_lower_; + int32_t *x_axis_upper_; + int32_t *y_axis_index_; + int32_t *y_axis_lower_; + int32_t *y_axis_upper_; +} ResizeQuantArg; + +typedef struct ResizeFloatScaleQuantArg { + float ratio_x_; + float ratio_y_; + float *x_axis_index_; + int32_t *x_axis_lower_; + int32_t *x_axis_upper_; + float *y_axis_index_; + int32_t *y_axis_lower_; + int32_t *y_axis_upper_; +} ResizeFloatScaleQuantArg; + +#ifdef __cplusplus +extern "C" { +#endif + +void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift); + +void QuantizeMultiplierSmallerThanOne(double double_multiplier, int32_t *quantized_multiplier, int *right_shift); + +void QuantizeRoundParameterWithDoublePrecision(double double_multiplier, int32_t *quantized_multiplier, int *left_shift, + int *right_shift); + +void QuantizeRoundParameterWithSinglePrecision(double double_multiplier, int32_t *quantized_multiplier, int *left_shift, + int *right_shift); + +uint8_t QuantizeToUint8(float real_value, float scale, int32_t zp); + +int32_t QuantizeToInt8(float real_value, float scale, int32_t zp); + +void CalculateActivationRangeQuantized(bool is_relu, bool is_relu6, int32_t zp, float scale, int *mini, int *maxi); +// quantize from float to int8 +void Quantize(const float *input_data, int length, float scale, int zero_point, int8_t *output_data); + +// dequantize from int8 to float +void Dequantize(int8_t *input_data, int length, float scale, int zero_point, float *output_data); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_QUANTIZATION_QUANTIZE_H_ diff --git a/mindspore/lite/nnacl/int8/reduce_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/reduce_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/reduce_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/reduce_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/reduce_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/reduce_int8.h new file mode 100644 index 0000000000..99a5dfee13 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/reduce_int8.h @@ -0,0 +1,70 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_REDUCE_INT8_H_ +#define MINDSPORE_NNACL_INT8_REDUCE_INT8_H_ + +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ReduceMeanN(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanH(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanW(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanNH(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanNW(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanNC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanHW(int n, int plane, int count, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg, + int32_t bias); +int ReduceMeanHC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanWC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanNHW(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanNHC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanNWC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanHWC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); +int ReduceMeanNHWC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); + +int ReduceMeanInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceMeanLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceSumInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceSumLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceMaxInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceMaxLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceMinInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceMinLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceProdLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceProdInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceSumSquareLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +int ReduceSumSquareInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, + int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_INT8_REDUCE_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/relux_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/relux_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/relux_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/relux_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/relux_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/relux_int8.h new file mode 100644 index 0000000000..591f53a94d --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/relux_int8.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INT8_RELU_INT8_H_ +#define MINDSPORE_NNACL_INT8_RELU_INT8_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/errorcode.h" +#include "nnacl/int8/fixed_point.h" +#include "nnacl/int8/quantize.h" + +typedef struct ReluXQuantArg { + QuantArg input_arg; + QuantArg output_arg; + int input_multiplier_; + int left_shift_; + int right_shift_; + int quantized_output_min; + int quantized_output_max; +} ReluXQuantArg; + +#ifdef __cplusplus +extern "C" { +#endif +void ReluXInt8(const int8_t *src, int length, int8_t *dst, ReluXQuantArg *arg); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_RELU_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/reshape_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/reshape_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/reshape_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/reshape_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/reshape_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/reshape_int8.h new file mode 100644 index 0000000000..cfe2045a79 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/reshape_int8.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_RESHAHPE_INT8_H_ +#define MINDSPORE_NNACL_INT8_RESHAHPE_INT8_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/reshape_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +void Int8Reshape(int8_t *input_ptr, int8_t *output_ptr, int64_t real_dst_count, ReshapeQuantArg para); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_RESHAHPE_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/resize_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/resize_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/resize_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/resize_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/resize_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/resize_int8.h new file mode 100644 index 0000000000..54189c3b2e --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/resize_int8.h @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INT8_RESIZE_H_ +#define MINDSPORE_NNACL_INT8_RESIZE_H_ + +#ifdef ENABLE_NEON +#include +#endif +#include +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" +#include "nnacl/resize_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int ResizeBilinearInt8(const int8_t *input_ptr, int8_t *output_ptr, int batch, int in_h, int in_w, int out_h, int out_w, + int channel, int index, int count, ResizeQuantArg quant_arg); + +int ResizeBilinearWithFloatScaleInt8(const int8_t *input_ptr, int8_t *output_ptr, int batch, int in_h, int in_w, + int out_h, int out_w, int channel, int index, int count, + ResizeFloatScaleQuantArg quant_arg); + +int ResizeNearestNeighborInt8Simple(const int8_t *input_data, int8_t *output_data, const int *input_shape, + const int *output_shape, const bool align_corners, int tid, int thread_num); + +int ResizeNearestNeighborInt8(const int8_t *input_data, int8_t *output_data, const int *input_shape, + const int *output_shape, const bool align_corners, const QuantMulArg *multiplier, + QuantArg *quant_in, QuantArg *quant_out, int tid, int thread_num); + +void ComputeNearestNeighborInt(const int32_t pos, const int in_size, const int32_t new_size, const bool align_corners, + int32_t *nearest); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_RESIZE_H_ diff --git a/mindspore/lite/nnacl/int8/scale_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/scale_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/scale_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/scale_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/scale_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/scale_int8.h new file mode 100644 index 0000000000..61876cf968 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/scale_int8.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_SCALE_INT8_H_ +#define MINDSPORE_NNACL_SCALE_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/scale.h" +#include "nnacl/nnacl_common.h" + +#ifdef __cplusplus +extern "C" { +#endif +void DoScaleInt8(const int8_t *in_data, int8_t *out_data, const int8_t *scale, const ScaleParameter *scale_param, + int real_dst_count); +void DoScaleWithBiasInt8(const int8_t *in_data, int8_t *out_data, const int8_t *scale, const int8_t *offset, + const ScaleParameter *scale_param, int real_dst_count); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_SCALE_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/sigmoid_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/sigmoid_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/sigmoid_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/sigmoid_int8.c diff --git a/mindspore/lite/nnacl/int8/sigmoid_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/sigmoid_int8.h similarity index 100% rename from mindspore/lite/nnacl/int8/sigmoid_int8.h rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/sigmoid_int8.h diff --git a/mindspore/lite/nnacl/int8/slice_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/slice_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/slice_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/slice_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/slice_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/slice_int8.h new file mode 100644 index 0000000000..b1b38923a0 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/slice_int8.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INT8_SLICE_INT8_H_ +#define MINDSPORE_NNACL_INT8_SLICE_INT8_H_ + +#include +#include +#include "nnacl/op_base.h" +#include "nnacl/slice_parameter.h" +#include "nnacl/int8/fixed_point.h" + +#ifdef __cplusplus +extern "C" { +#endif +int SliceInt8NoParallel(const int8_t *input, int8_t *output, SliceParameter *param); +int SliceInt8(const int8_t *input, int8_t *output, SliceParameter *param, int thread_id); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_SLICE_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/softmax_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/softmax_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/softmax_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/softmax_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/softmax_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/softmax_int8.h new file mode 100644 index 0000000000..dfe2213669 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/softmax_int8.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_SOFTMAX_INT8_H_ +#define MINDSPORE_NNACL_INT8_SOFTMAX_INT8_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/softmax_parameter.h" +#include "nnacl/int8/fixed_point.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif +int SoftmaxInt8(const int8_t *input_ptr, int8_t *output_ptr, int count, int *exp_data, int *sum_data, + SoftmaxQuantArg quant_param, SoftmaxParameter *parameter); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_SOFTMAX_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/space_to_batch_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/space_to_batch_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/space_to_batch_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/space_to_batch_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/space_to_batch_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/space_to_batch_int8.h new file mode 100644 index 0000000000..fb035e21f5 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/space_to_batch_int8.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INT8_SPACE_TO_BATCH_INT8_H_ +#define MINDSPORE_NNACL_INT8_SPACE_TO_BATCH_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/fp32/space_to_batch_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif +void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, const int *block_sizes, const int *in_shape, + const int *out_shape); +void DoSpaceToBatchPaddingNHWCInt8(const int8_t *input, int8_t *output, SpaceToBatchParameter *param, int32_t zp); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_SPACE_TO_BATCH_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/splice_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/splice_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/splice_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/splice_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/splice_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/splice_int8.h new file mode 100644 index 0000000000..42bc0e5a64 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/splice_int8.h @@ -0,0 +1,30 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_INT8_SPLICE_INT8_H_ +#define MINDSPORE_NNACL_INT8_SPLICE_INT8_H_ +#include +#include "nnacl/splice_parameter.h" +#ifdef __cplusplus +extern "C" { +#endif + +void SpliceInt8(const int8_t *src_data, int src_row, int src_col, const SpliceParameter *splice_parameter, + int8_t *dst_data, int dst_row, int dst_col); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_INT8_SPLICE_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/split_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/split_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/split_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/split_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/split_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/split_int8.h new file mode 100644 index 0000000000..8933777e34 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/split_int8.h @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_SPLIT_INT8_H_ +#define MINDSPORE_NNACL_INT8_SPLIT_INT8_H_ + +#include +#include "nnacl/op_base.h" +#include "nnacl/split_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif +int Int8DoSplit(int8_t *in_data, int8_t **out_data, const int *input_shape, int offset, int num_unit, + SplitParameter *split_param); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_SPLIT_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/squeeze_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/squeeze_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/squeeze_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/squeeze_int8.c diff --git a/mindspore/lite/nnacl/int8/squeeze_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/squeeze_int8.h similarity index 100% rename from mindspore/lite/nnacl/int8/squeeze_int8.h rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/squeeze_int8.h diff --git a/mindspore/lite/nnacl/int8/sub_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/sub_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/sub_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/sub_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/sub_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/sub_int8.h new file mode 100644 index 0000000000..0095324b79 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/sub_int8.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_SUB_INT8_H_ +#define MINDSPORE_NNACL_INT8_SUB_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +#ifdef __cplusplus +extern "C" { +#endif +int SubInt8(int8_t *input0_data, int8_t *input1_data, int8_t *output_data, int64_t real_dst_count, SubQuantArg *para); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_SUB_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/tanh_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/tanh_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/tanh_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/tanh_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/tanh_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/tanh_int8.h new file mode 100644 index 0000000000..428a1bd805 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/tanh_int8.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_TANH_INT8_H_ +#define MINDSPORE_NNACL_INT8_TANH_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" +#include "nnacl/int8/fixed_point.h" +#include "nnacl/int8/quant_dtype_cast_int8.h" +#include "nnacl/fp32/activation_fp32.h" + +typedef struct TanhQuantParameter { + int32_t in_zp_; + int32_t out_zp_; + double in_scale_; + double out_scale_; +} TanhQuantParameter; + +#ifdef __cplusplus +extern "C" { +#endif + +void TanhInt8(const int8_t *input_ptr, int8_t *output_ptr, int size, TanhQuantParameter *quant); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_TANH_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/topk_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/topk_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/topk_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/topk_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/topk_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/topk_int8.h new file mode 100644 index 0000000000..1fe82f2eca --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/topk_int8.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_TOPK_INT8_H_ +#define MINDSPORE_NNACL_INT8_TOPK_INT8_H_ + +#include "nnacl/op_base.h" +#include "nnacl/fp32/topk_fp32.h" + +typedef struct TopkNodeInt8 { + int8_t element; + int32_t index; +} TopkNodeInt8; + +#ifdef __cplusplus +extern "C" { +#endif +void TopkInt8(int8_t *input_data, int8_t *output_data, int32_t *output_index, TopkParameter *parameter); +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_TOPK_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/transpose_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/transpose_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/transpose_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/transpose_int8.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/transpose_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/transpose_int8.h new file mode 100644 index 0000000000..7f842482f7 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/transpose_int8.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INT8_TRANSPOSE_INT8_H_ +#define MINDSPORE_NNACL_INT8_TRANSPOSE_INT8_H_ + +#include +#include "nnacl/transpose.h" +#include "nnacl/errorcode.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int DoTransposeInt8(const int8_t *in_data, int8_t *out_data, const int *output_shape, + TransposeParameter *transpose_param, int h_start, int h_end, int *dim_size, int *position); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INT8_TRANSPOSE_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/unsqueeze_int8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/unsqueeze_int8.c similarity index 100% rename from mindspore/lite/nnacl/int8/unsqueeze_int8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/unsqueeze_int8.c diff --git a/mindspore/lite/nnacl/int8/unsqueeze_int8.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/unsqueeze_int8.h similarity index 100% rename from mindspore/lite/nnacl/int8/unsqueeze_int8.h rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/unsqueeze_int8.h diff --git a/mindspore/lite/nnacl/intrinsics/avx/common_utils.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/avx/common_utils.c similarity index 100% rename from mindspore/lite/nnacl/intrinsics/avx/common_utils.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/avx/common_utils.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/avx/common_utils.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/avx/common_utils.h new file mode 100644 index 0000000000..f53de26dac --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/avx/common_utils.h @@ -0,0 +1,44 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_X86_64_AVX_COMMON_UTILS_H_ +#define MINDSPORE_NNACL_X86_64_AVX_COMMON_UTILS_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif +#ifdef __GNUC__ +#if __GNUC__ < 8 +#define _mm256_set_m128i(xmm1, xmm2) \ + _mm256_permute2f128_si256(_mm256_castsi128_si256(xmm1), _mm256_castsi128_si256(xmm2), 2) +#define _mm256_set_m128f(xmm1, xmm2) \ + _mm256_permute2f128_ps(_mm256_castps128_ps256(xmm1), _mm256_castps128_ps256(xmm2), 2) +#endif +#endif + +// Signed saturating Add +__m128i _mm_adds_epi32(__m128i a, __m128i b); + +// Signed rounding shift right +__m128i _mm_rshr_epi32(__m128i a, int shift); + +// Signed saturating Rounding Doubling Multiply return High half +__m128i _mm_qrdmulh_epi32(__m128i a, __m128i b); +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_X86_64_AVX_COMMON_UTILS_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/ms_simd_instructions.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/ms_simd_instructions.h new file mode 100644 index 0000000000..4b46d798d7 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/ms_simd_instructions.h @@ -0,0 +1,229 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INTRINSICS_MS_SIMD_INSTRUCTIONS_H_ +#define MINDSPORE_NNACL_INTRINSICS_MS_SIMD_INSTRUCTIONS_H_ +#include +#ifdef ENABLE_ARM +#include +#endif +#if defined(ENABLE_SSE) || defined(ENABLE_AVX) +#include +#endif + +#ifdef ENABLE_ARM +#define MS_FLOAT32X4 float32x4_t +#define MS_INT32X4 int32x4_t +#define MS_LDQ_F32 vld1q_f32 +#define MS_LDQ_EPI32 vld1q_s32 +#define MS_ADDQ_F32 vaddq_f32 +#define MS_ADDQ_EPI32 vaddq_s32 +#define MS_MOVQ_F32 vmovq_n_f32 +#define MS_MOVQ_EPI32 vmovq_n_s32 +#define MS_SUBQ_F32 vsubq_f32 +#define MS_MLAQ_F32(src1, src2, src3) vmlaq_f32(src1, src2, src3) +#define MS_STQ_F32 vst1q_f32 +#define MS_STQ_EPI32 vst1q_s32 +#define MS_MAXQ_F32 vmaxq_f32 +#define MS_MAXQ_EPI32 vmaxq_s32 +#define MS_MINQ_F32 vminq_f32 +#define MS_MINQ_EPI32 vminq_s32 +#define MS_MULQ_F32(src1, src2) vmulq_f32(src1, src2) +#define MS_MULQ_EPI32(src1, src2) vmulq_s32(src1, src2) +#ifdef ENABLE_ARM64 +#define MS_DIVQ_F32(src1, src2) vdivq_f32(src1, src2) +#else +inline static float32x4_t vrecp(float32x4_t v) { + float32x4_t r = vrecpeq_f32(v); + r = vmulq_f32(vrecpsq_f32(v, r), r); + r = vmulq_f32(vrecpsq_f32(v, r), r); + return r; +} +#define MS_DIVQ_F32(src1, src2) vmulq_f32(src1, vrecp(src2)) +#endif +#define MS_MULQ_N_F32(src1, src2) vmulq_n_f32(src1, src2) +#define MS_MULQ_N_EPI32(src1, src2) vmulq_n_s32(src1, src2) +#define MS_DIVQ_N_F32(src1, src2) vdivq_n_f32(src1, src2) +#define MS_SLLIQ_EPI32(src1, src2) vshlq_s32(src1, vmovq_n_s32(src2)) +#define MS_CVTQPS_EPI32(src) vcvtq_s32_f32(src) +#define MS_CVTQEPI32_PS(src) vcvtq_f32_s32(src) +#define MS_CMPGTQ_F32(src1, src2) vcgtq_f32(src1, src2) +#define MS_CMPGTQ_EPI32(src1, src2) vcgtq_s32(src1, src2) +// Note: Compared with X86, the vbslq_f32 parameters are the opposite with _mm_blendv_f32 +#define MS_BLENDQ_F32(src1, src2, src3) vbslq_f32(src3, src2, src1) +#define MS_BLENDQ_EPI32(src1, src2, src3) vbslq_s32(src3, src2, src1) +#define MS_CAST_F32_S32(src) vreinterpretq_f32_s32(src) +#endif + +#if defined(ENABLE_AVX) +#define MS_FLOAT32X8 __m256 +#define MS_INT32X8 __m256i +#define MS_LD256_F32 _mm256_loadu_ps +#define MS_LD256_EPI32(src) _mm256_loadu_si256((__m256i const *)(src)) +#define MS_ADD256_F32 _mm256_add_ps +#define MS_ADD256_EPI32 _mm256_add_epi32 +#define MS_MOV256_F32 _mm256_set1_ps +#define MS_MOV256_EPI32 _mm256_set1_epi32 +#define MS_MLA256_F32(src1, src2, src3) _mm256_add_ps(src1, _mm256_mul_ps(src2, src3)) +#define MS_ST256_F32 _mm256_storeu_ps +#define MS_ST256_EPI32(src1, src2) _mm256_storeu_si256((__m256i *)(src1), src2) +#define MS_SUB256_F32 _mm256_sub_ps +#define MS_MAX256_F32 _mm256_max_ps +#define MS_MAX256_EPI32 _mm256_max_epi32 +#define MS_MIN256_F32 _mm256_min_ps +#define MS_MIN256_EPI32 _mm256_min_epi32 +#define MS_MUL256_F32(src1, src2) _mm256_mul_ps(src1, src2) +#define MS_MUL256_EPI32(src1, src2) _mm256_mul_epi32(src1, src2) +#define MS_DIV256_F32(src1, src2) _mm256_div_ps(src1, src2) +#define MS_MUL256_N_F32(src1, src2) _mm256_mul_ps(src1, _mm256_set1_ps(src2)) +#define MS_MUL256_N_EPI32(src1, src2) _mm256_mul_epi32(src1, _mm256_set1_epi32(src2)) +#define MS_DIV256_N_F32(src1, src2) _mm256_div_ps(src1, _mm256_set1_ps(src2)) +#define MS_SLLI256_EPI32(src1, src2) _mm256_slli_epi32(src1, src2) +#define MS_CVT256PS_EPI32(src) _mm256_cvttps_epi32(src) +#define MS_CVT256EPI32_PS(src) _mm256_cvtepi32_ps(src) // truncate float to int +#define MS_CMP256_F32(src1, src2, src3) _mm256_cmp_ps(src1, src2, src3) +#define MS_CMPGT256_EPI32(src1, src2) _mm256_cmpgt_epi32(src1, src2) +#define MS_BLEND256_F32(src1, src2, src3) _mm256_blendv_ps(src1, src2, src3) +#define MS_BLEND256_EPI32(src1, src2, src3) _mm256_blendv_epi8(src1, src2, src3) +#define MS_CAST256_F32_S32(src) _mm256_castsi256_ps(src) +#endif + +#if defined(ENABLE_SSE) +#define MS_FLOAT32X4 __m128 +#define MS_INT32X4 __m128i +#define MS_LDQ_F32 _mm_loadu_ps +#define MS_LDQ_EPI32(src) _mm_loadu_si128((__m128i const *)(src)) +#define MS_ADDQ_F32 _mm_add_ps +#define MS_ADDQ_EPI32 _mm_add_epi32 +#define MS_MOVQ_F32 _mm_set1_ps +#define MS_MOVQ_EPI32 _mm_set1_epi32 +#define MS_MLAQ_F32(src1, src2, src3) _mm_add_ps(src1, _mm_mul_ps(src2, src3)) +#define MS_STQ_F32 _mm_storeu_ps +#define MS_STQ_EPI32(src1, src2) _mm_storeu_si128((__m128i *)(src1), src2) +#define MS_SUBQ_F32 _mm_sub_ps +#define MS_MAXQ_F32 _mm_max_ps +#define MS_MAXQ_EPI32 _mm_max_epi32 +#define MS_MINQ_F32 _mm_min_ps +#define MS_MINQ_EPI32 _mm_min_epi32 +#define MS_MULQ_F32(src1, src2) _mm_mul_ps(src1, src2) +#define MS_MULQ_EPI32(src1, src2) _mm_mul_epi32(src1, src2) +#define MS_DIVQ_F32(src1, src2) _mm_div_ps(src1, src2) +#define MS_MULQ_N_F32(src1, src2) _mm_mul_ps(src1, _mm_set1_ps(src2)) +#define MS_MULQ_N_EPI32(src1, src2) _mm_mul_epi32(src1, _mm_set1_epi32(src2)) +#define MS_DIVQ_N_F32(src1, src2) _mm_div_ps(src1, _mm_set1_ps(src2)) +#define MS_SLLIQ_EPI32(src1, src2) _mm_slli_epi32(src1, src2) +#define MS_CVTQPS_EPI32(src) _mm_cvttps_epi32(src) // truncate float to int +#define MS_CVTQEPI32_PS(src) _mm_cvtepi32_ps(src) +#define MS_CMPGTQ_F32(src1, src2) _mm_cmpgt_ps(src1, src2) +#define MS_CMPGTQ_EPI32(src1, src2) _mm_cmpgt_epi32(src1, src2) +#define MS_BLENDQ_F32(src1, src2, src3) _mm_blendv_ps(src1, src2, src3) +#define MS_BLENDQ_EPI32(src1, src2, src3) _mm_blendv_epi8(src1, src2, src3) +#define MS_CAST_F32_S32(src) _mm_castsi128_ps(src) +#endif + +#define LOAD256X8_F32(src, input_ptr, num) \ + MS_FLOAT32X8 src##1 = MS_LD256_F32(input_ptr + 0 * num); \ + MS_FLOAT32X8 src##2 = MS_LD256_F32(input_ptr + 1 * num); \ + MS_FLOAT32X8 src##3 = MS_LD256_F32(input_ptr + 2 * num); \ + MS_FLOAT32X8 src##4 = MS_LD256_F32(input_ptr + 3 * num); \ + MS_FLOAT32X8 src##5 = MS_LD256_F32(input_ptr + 4 * num); \ + MS_FLOAT32X8 src##6 = MS_LD256_F32(input_ptr + 5 * num); \ + MS_FLOAT32X8 src##7 = MS_LD256_F32(input_ptr + 6 * num); \ + MS_FLOAT32X8 src##8 = MS_LD256_F32(input_ptr + 7 * num); + +#define STORE256X8_F32(output_ptr, num, dst) \ + MS_ST256_F32(output_ptr + 0 * num, dst##1); \ + MS_ST256_F32(output_ptr + 1 * num, dst##2); \ + MS_ST256_F32(output_ptr + 2 * num, dst##3); \ + MS_ST256_F32(output_ptr + 3 * num, dst##4); \ + MS_ST256_F32(output_ptr + 4 * num, dst##5); \ + MS_ST256_F32(output_ptr + 5 * num, dst##6); \ + MS_ST256_F32(output_ptr + 6 * num, dst##7); \ + MS_ST256_F32(output_ptr + 7 * num, dst##8); + +#define LOAD128X8_F32(src, input_ptr, num) \ + MS_FLOAT32X4 src##1 = MS_LDQ_F32(input_ptr + 0 * num); \ + MS_FLOAT32X4 src##2 = MS_LDQ_F32(input_ptr + 1 * num); \ + MS_FLOAT32X4 src##3 = MS_LDQ_F32(input_ptr + 2 * num); \ + MS_FLOAT32X4 src##4 = MS_LDQ_F32(input_ptr + 3 * num); \ + MS_FLOAT32X4 src##5 = MS_LDQ_F32(input_ptr + 4 * num); \ + MS_FLOAT32X4 src##6 = MS_LDQ_F32(input_ptr + 5 * num); \ + MS_FLOAT32X4 src##7 = MS_LDQ_F32(input_ptr + 6 * num); \ + MS_FLOAT32X4 src##8 = MS_LDQ_F32(input_ptr + 7 * num); + +#define STORE128X8_F32(output_ptr, num, dst) \ + MS_STQ_F32(output_ptr + 0 * num, dst##1); \ + MS_STQ_F32(output_ptr + 1 * num, dst##2); \ + MS_STQ_F32(output_ptr + 2 * num, dst##3); \ + MS_STQ_F32(output_ptr + 3 * num, dst##4); \ + MS_STQ_F32(output_ptr + 4 * num, dst##5); \ + MS_STQ_F32(output_ptr + 5 * num, dst##6); \ + MS_STQ_F32(output_ptr + 6 * num, dst##7); \ + MS_STQ_F32(output_ptr + 7 * num, dst##8); + +static inline MS_FLOAT32X4 MS_TANHX4_F32(MS_FLOAT32X4 src) { + static const float data[] = {378.0f, 17325.0f, 135135.0f, 28.0f, 3150.0f, 62370.0f}; + static const MS_FLOAT32X4 neg = {-1.0f, -1.0f, -1.0f, -1.0f}; + static const MS_FLOAT32X4 pos = {1.0f, 1.0f, 1.0f, 1.0f}; + MS_FLOAT32X4 square = src * src; + MS_FLOAT32X4 a = (((square + data[0]) * square + data[1]) * square + data[2]) * src; + MS_FLOAT32X4 b = ((data[3] * square + data[4]) * square + data[5]) * square + data[2]; + return MS_MINQ_F32(MS_MAXQ_F32(a / b, neg), pos); +} + +#ifdef ENABLE_AVX +static inline MS_FLOAT32X8 MS_TANHX8_F32(MS_FLOAT32X8 src) { + static const float data[] = {378.0f, 17325.0f, 135135.0f, 28.0f, 3150.0f, 62370.0f}; + static const MS_FLOAT32X8 neg = {-1.0f, -1.0f, -1.0f, -1.0f, -1.0f, -1.0f, -1.0f, -1.0f}; + static const MS_FLOAT32X8 pos = {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}; + MS_FLOAT32X8 square = src * src; + MS_FLOAT32X8 a = (((square + data[0]) * square + data[1]) * square + data[2]) * src; + MS_FLOAT32X8 b = ((data[3] * square + data[4]) * square + data[5]) * square + data[2]; + return MS_MIN256_F32(MS_MAX256_F32(a / b, neg), pos); +} +#endif + +static inline MS_FLOAT32X4 MS_ERFX4_F32(MS_FLOAT32X4 src) { + MS_FLOAT32X4 dst; + dst[0] = erff(src[0]); + dst[1] = erff(src[1]); + dst[2] = erff(src[2]); + dst[3] = erff(src[3]); + return dst; +} + +#ifdef ENABLE_ARM64 +static inline float16x8_t MS_TANHX8_F16(float16x8_t src) { + float32x4_t src_low = vcvt_f32_f16(vget_low_f16(src)); + float32x4_t src_high = vcvt_f32_f16(vget_high_f16(src)); + return vcombine_f16(vcvt_f16_f32(MS_TANHX4_F32(src_low)), vcvt_f16_f32(MS_TANHX4_F32(src_high))); +} + +static inline float16x8_t MS_ERFX8_F16(float16x8_t src) { + float16x8_t dst; + dst[0] = erff(src[0]); + dst[1] = erff(src[1]); + dst[2] = erff(src[2]); + dst[3] = erff(src[3]); + dst[4] = erff(src[4]); + dst[5] = erff(src[5]); + dst[6] = erff(src[6]); + dst[7] = erff(src[7]); + return dst; +} +#endif + +#endif // MINDSPORE_NNACL_INTRINSICS_MS_SIMD_INSTRUCTIONS_H_ diff --git a/mindspore/lite/nnacl/intrinsics/sse/ConvDwFp32IndirectRow.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/ConvDwFp32IndirectRow.c similarity index 100% rename from mindspore/lite/nnacl/intrinsics/sse/ConvDwFp32IndirectRow.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/ConvDwFp32IndirectRow.c diff --git a/mindspore/lite/nnacl/intrinsics/sse/ConvDwFp32Row_sse.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/ConvDwFp32Row_sse.c similarity index 100% rename from mindspore/lite/nnacl/intrinsics/sse/ConvDwFp32Row_sse.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/ConvDwFp32Row_sse.c diff --git a/mindspore/lite/nnacl/intrinsics/sse/DepthwiseFp32_Sse.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/DepthwiseFp32_Sse.c similarity index 100% rename from mindspore/lite/nnacl/intrinsics/sse/DepthwiseFp32_Sse.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/DepthwiseFp32_Sse.c diff --git a/mindspore/lite/nnacl/intrinsics/sse/MatMul_Sse.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/MatMul_Sse.c similarity index 100% rename from mindspore/lite/nnacl/intrinsics/sse/MatMul_Sse.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/MatMul_Sse.c diff --git a/mindspore/lite/nnacl/intrinsics/sse/PostFuncBiasReluC4.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/PostFuncBiasReluC4.c similarity index 100% rename from mindspore/lite/nnacl/intrinsics/sse/PostFuncBiasReluC4.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/PostFuncBiasReluC4.c diff --git a/mindspore/lite/nnacl/intrinsics/sse/PostFuncBiasReluC8.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/PostFuncBiasReluC8.c similarity index 100% rename from mindspore/lite/nnacl/intrinsics/sse/PostFuncBiasReluC8.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/PostFuncBiasReluC8.c diff --git a/mindspore/lite/nnacl/intrinsics/sse/TiledC4MatMulFp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/TiledC4MatMulFp32.c similarity index 100% rename from mindspore/lite/nnacl/intrinsics/sse/TiledC4MatMulFp32.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/TiledC4MatMulFp32.c diff --git a/mindspore/lite/nnacl/intrinsics/sse/WinogradTrans.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/WinogradTrans.c similarity index 100% rename from mindspore/lite/nnacl/intrinsics/sse/WinogradTrans.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/WinogradTrans.c diff --git a/mindspore/lite/nnacl/intrinsics/sse/sse_common.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/sse_common.c similarity index 100% rename from mindspore/lite/nnacl/intrinsics/sse/sse_common.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/sse_common.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/sse_common.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/sse_common.h new file mode 100644 index 0000000000..e726b78510 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/sse/sse_common.h @@ -0,0 +1,56 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_INTRINSICS_SSE_SSE_COMMON_H_ +#define MINDSPORE_NNACL_INTRINSICS_SSE_SSE_COMMON_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +void ActBlock1(__m128 *v1, size_t relu, size_t relu6); +void ActBlock2(__m128 *v1, __m128 *v2, size_t relu, size_t relu6); +void ActBlock4(__m128 *v1, __m128 *v2, __m128 *v3, __m128 *v4, size_t relu, size_t relu6); +void ActBlock8(__m128 *v1, __m128 *v2, __m128 *v3, __m128 *v4, __m128 *v5, __m128 *v6, __m128 *v7, __m128 *v8, + size_t relu_type); + +void WriteCol1(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); +void WriteCol2(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int r); +void WriteCol2Opt(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int r); +void WriteCol3(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); +void WriteCol4(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); +void WriteCol5(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); +void WriteCol6(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); +void WriteCol7(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); +void WriteCol8(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, + __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); + +void DoBiasBlock8(const float *bias_ptr, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, + __m128 *dst6, __m128 *dst7, __m128 *dst8); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_INTRINSICS_SSE_SSE_COMMON_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/l2_norm_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/l2_norm_parameter.h new file mode 100644 index 0000000000..78effda270 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/l2_norm_parameter.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_L2NORM_PARAMETER_H_ +#define MINDSPORE_NNACL_L2NORM_PARAMETER_H_ + +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +typedef struct L2NormParameter { + // Primitive parameter + OpParameter op_parameter_; + float epsilon_; + int axis_[MAX_SHAPE_SIZE]; + // shape correlative + size_t axis_num_; + int data_num_; + int *shape_; + size_t shape_num_; + // other parameter + ActType act_type_; +} L2NormParameter; + +typedef struct { + QuantArg in_; + QuantArg out_; +} L2NormQuantArg; + +#endif // MINDSPORE_NNACL_L2NORM_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/layer_norm_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/layer_norm_parameter.h new file mode 100644 index 0000000000..00cfa5e83f --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/layer_norm_parameter.h @@ -0,0 +1,50 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_LAYER_NORM_PARAMETER_H_ +#define MINDSPORE_NNACL_LAYER_NORM_PARAMETER_H_ + +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +enum ElementwiseMode { ELEMENTWISE_NOT = 0, ELEMENTWISE_PER_CHANNEL = 1, ELEMENTWISE_PER_NUM = 2 }; +typedef struct LayerNormParameter { + // Primitive parameter + OpParameter op_parameter_; + float epsilon_; + enum ElementwiseMode elementwise_mode_; + bool elementwise_affine_; + int begin_norm_axis_; + int begin_params_axis_; + // shape correlative + int norm_inner_size_; + int norm_outer_size_; + int params_inner_size_; + int params_outer_size_; + int normalized_dims_; + int normalized_shape_[MAX_SHAPE_SIZE]; + // other parameter + int thread_count_; + int thread_outsize_; +} LayerNormParameter; + +typedef struct LayerNormQuantArg { + int32_t in_zp_; + int32_t out_zp_; + double in_scale_; + double out_scale_; +} LayerNormQuantArg; + +#endif // MINDSPORE_NNACL_LAYER_NORM_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/lsh_projection_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/lsh_projection_parameter.h new file mode 100644 index 0000000000..e52a1cc6d4 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/lsh_projection_parameter.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_LSH_PROJECTION_PARAMETER_H_ +#define MINDSPORE_NNACL_LSH_PROJECTION_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct LshProjectionParameter { + // Primitive parameter + OpParameter op_parameter_; + // shape correlative + int hash_shape_[2]; + // other parameter + int lsh_type_; + int feature_num_; + char **hash_buffs_; + size_t hash_buff_size_; + int64_t thread_stride_; +} LshProjectionParameter; + +#endif // MINDSPORE_NNACL_LSH_PROJECTION_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/lstm_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/lstm_parameter.h new file mode 100644 index 0000000000..8c5eb57343 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/lstm_parameter.h @@ -0,0 +1,40 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_LSTM_PARAMETER_H_ +#define MINDSPORE_NNACL_LSTM_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct LstmParameter { + // Primitive parameter + OpParameter op_parameter_; + // shape correlative + int input_size_; + int hidden_size_; // output_size + int seq_len_; + int batch_; + // other parameter + int output_step_; + bool bidirectional_; + float zoneout_cell_; + float zoneout_hidden_; + int input_row_align_; + int input_col_align_; + int state_row_align_; + int state_col_align_; +} LstmParameter; + +#endif // MINDSPORE_NNACL_LSTM_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/matmul_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/matmul_parameter.h new file mode 100644 index 0000000000..a513f4608b --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/matmul_parameter.h @@ -0,0 +1,79 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_MATMUL_H_ +#define MINDSPORE_NNACL_MATMUL_H_ + +#include "nnacl/op_base.h" + +typedef void (*MATMUL_OPT_R4_FUNC)(const int8_t *a, const int8_t *b, int *dst, int row_4, int col_4, int deep_16, + const int *input_sum, const int *bias); + +typedef void (*MATMUL_OPT_R_FUNC)(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4, + size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift, + int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, + int32_t maxi, size_t per_channel); + +typedef void (*MATMUL_OPT_DP_FUNC)(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4, + size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift, + int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, + int32_t maxi, size_t per_channel, int *filter_zp); + +typedef enum OutType { OutType_C8 = 0, OutType_Nhwc = 1, OutType_TileC8 = 2 } OutType; + +typedef struct MatMulParameter { + // Primitive parameter + OpParameter op_parameter_; + bool has_bias_; + + // other parameter + int row_; + int col_; + int row_4_; + int row_6_; + int row_12_; + int row_16_; + int row_align_; + int col_4_; + int col_8_; + int col_align_; + int deep_; + int deep_4_; + int deep_16_; + int batch; + bool a_transpose_; /* false : row-major */ + bool b_transpose_; /* true : col-major */ + bool a_const_; + bool b_const_; + ActType act_type_; + bool use_axis_; + int axis_; +} MatMulParameter; + +typedef struct MatmulQuantParameter { + QuantArg input_; + QuantArg weight_; + QuantArg output_; + int32_t out_act_min_; + int32_t out_act_max_; + float *filter_scale_; + int32_t *filter_zp_; + int32_t *left_shift_; + int32_t *right_shift_; + int32_t *quant_multiplier_; +} MatmulQuantParameter; + +#endif // MINDSPORE_NNACL_MATMUL_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/mul_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/mul_parameter.h new file mode 100644 index 0000000000..d36daf32a2 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/mul_parameter.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_MUL_PARAMETER_H_ +#define MINDSPORE_NNACL_MUL_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct MulQuantArg { + QuantArg in_quant_args_[2]; + QuantArg out_quant_arg_; + int output_multiplier_; + int output_activation_min_; + int output_activation_max_; + int shift_left_; + int shift_right_; +} MulQuantArg; + +typedef struct MulParameter { + // Primitive parameter + OpParameter op_parameter_; + // other parameter + int thread_count_; + MulQuantArg mul_quant_arg_; +} MulParameter; + +#endif // MINDSPORE_NNACL_MUL_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/nnacl_common.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/nnacl_common.c similarity index 100% rename from mindspore/lite/nnacl/nnacl_common.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/nnacl_common.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/nnacl_common.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/nnacl_common.h new file mode 100644 index 0000000000..eee9a5e984 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/nnacl_common.h @@ -0,0 +1,60 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_NNACL_COMMON_H_ +#define MINDSPORE_NNACL_NNACL_COMMON_H_ + +#include "nnacl/op_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +static inline void ComputeStrides(const int *shape, int *strides, const int ndim) { + int stride = 1; + for (int i = ndim - 1; i >= 0; i--) { + strides[i] = stride; + stride *= shape[i]; + } +} + +static inline void ComputeAxisDims(const int *shape, int shape_size, int axis, int *out_count, int *axis_count, + int *in_count) { + *out_count = 1; + *in_count = 1; + for (int i = 0; i < shape_size; i++) { + if (i < axis) *out_count = (*out_count) * shape[i]; + if (i == axis) *axis_count = shape[axis]; + if (i > axis) *in_count = (*in_count) * shape[i]; + } +} + +static const unsigned int FP32_BIT_SIZE = 32; +static const unsigned int FP32_EXPONENT_BIAS = 127; +static const unsigned int FP32_SIGNIFICAND = 23; +static const unsigned int FP32_EXPONENT_MAX = 255; +static const unsigned int FP16_BIT_SIZE = 16; +static const unsigned int FP16_EXPONENT_BIAS = 15; +static const unsigned int FP16_SIGNIFICAND = 10; +static const int FP16_EXPONENT_MAX = 30; +static const int FP16_EXPONENT_MIN = -10; +float ShortToFloat32(uint16_t src_value); +uint16_t Float32ToShort(float src_value); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_NNACL_COMMON_H_ diff --git a/mindspore/lite/nnacl/nnacl_utils.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/nnacl_utils.c similarity index 100% rename from mindspore/lite/nnacl/nnacl_utils.c rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/nnacl_utils.c diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/nnacl_utils.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/nnacl_utils.h new file mode 100644 index 0000000000..903754c02c --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/nnacl_utils.h @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_NNACL_UTILS_H_ +#define MINDSPORE_NNACL_NNACL_UTILS_H_ + +#include +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(__arm__) || defined(__aarch64__) +uint32_t getHwCap(int hwcap_type); +#endif + +#ifdef DEBUG +#include +#define NNACL_ASSERT(f) assert(f) +#else +#define NNACL_ASSERT(f) ((void)0) +#endif + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_NNACL_NNACL_UTILS_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/non_max_suppression_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/non_max_suppression_parameter.h new file mode 100644 index 0000000000..d40cd71d00 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/non_max_suppression_parameter.h @@ -0,0 +1,28 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_NON_MAX_SUPPRESSION_PARAMETER_H_ +#define MINDSPORE_NNACL_NON_MAX_SUPPRESSION_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct NMSParameter { + // Primitive parameter + OpParameter op_parameter_; + int center_point_box_; +} NMSParameter; + +#endif // MINDSPORE_NNACL_NON_MAX_SUPPRESSION_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/op_base.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/op_base.h new file mode 100644 index 0000000000..c75f9be843 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/op_base.h @@ -0,0 +1,107 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_OP_BASE_H_ +#define MINDSPORE_NNACL_OP_BASE_H_ + +#include +#include +#include +#include +#if defined(ENABLE_AVX) || defined(ENABLE_SSE) || defined(ENABLE_ARM) +#include "nnacl/intrinsics/ms_simd_instructions.h" +#endif + +#define C2NUM 2 +#define C4NUM 4 +#define C6NUM 6 +#define C8NUM 8 +#define C12NUM 12 +#define C16NUM 16 +#define TILE_NUM 8 + +#define MSMIN(x, y) ((x) < (y) ? (x) : (y)) +#define MSMAX(x, y) ((x) > (y) ? (x) : (y)) + +#define UP_DIV(x, y) (((x) + (y) - (1)) / (y)) +#define UP_ROUND(x, y) (((x) + (y) - (1)) / (y) * (y)) +#define UP_ROUND_DIV(x, y) (x % y == 0 ? (x / y) : (x / y) + 1) +#define DOWN_DIV(x, y) (((x) - (y) + (1)) / (y)) + +#define MSVALID(left, x, right) (MSMIN((MSMAX(left, x)), right)) + +#define COMM_SHAPE_SIZE 4 +#define MAX_SHAPE_SIZE 8 + +#define DIMENSION_4D 4 +#define DIMENSION_6D 6 +#define DIMENSION_7D 7 +#define kInputIndex 0 +#define kWeightIndex 1 +#define kBiasIndex 2 +#define kOutputIndex 0 +#define kNHWC_N 0 +#define kNHWC_H 1 +#define kNHWC_W 2 +#define kNHWC_C 3 +#define kInputSize1 2 +#define kInputSize2 3 +#define MAX_AXIS_SIZE 6 +#define MAX_LEN 256 +#define FLT16_MAX 65504 + +typedef enum LiteDataType { + kDataTypeFloat, + kDataTypeFloat16, + kDataTypeInt, + kDataTypeInt8, + KDataTypeBool, +} LiteDataType; + +typedef enum DataOrder { + RowMajor, + ColMajor, +} DataOrder; + +typedef struct OpParameter { + char name_[100]; + bool infer_flag_; + int type_; + int thread_num_; + int quant_type_; +} OpParameter; + +typedef struct QuantArg { + float scale_; + int32_t zp_; +} QuantArg; + +typedef struct QuantMulArg { + int32_t multiplier_; + int left_shift_; + int right_shift_; +} QuantMulArg; + +typedef enum ActType { ActType_No, ActType_Relu, ActType_Sigmod, ActType_Relu6, ActType_Prelu } ActType; +typedef enum PadMode { Pad_pad, Pad_same, Pad_valid } PadMode; +typedef enum RoundingMode { Rounding_No, Rounding_Away_from_zero, Rounding_Up } RoundingMode; +typedef enum CalFixedMultiplierMode { + Method_No, + Method_SinglePrecision, + Method_DoublePrecision +} CalFixedMultiplierMode; + +#endif // MINDSPORE_NNACL_OP_BASE_H_ diff --git a/mindspore/lite/nnacl/optimize/CMakeLists.txt b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/optimize/CMakeLists.txt similarity index 100% rename from mindspore/lite/nnacl/optimize/CMakeLists.txt rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/optimize/CMakeLists.txt diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/pack.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/pack.h new file mode 100644 index 0000000000..7c3a4b3acc --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/pack.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_PACK_H_ +#define MINDSPORE_NNACL_PACK_H_ + +#include "nnacl/fp32/pack_fp32.h" +#include "nnacl/int8/pack_int8.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_NNACL_PACK_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/pad_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/pad_parameter.h new file mode 100644 index 0000000000..a6f2a1b5e4 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/pad_parameter.h @@ -0,0 +1,51 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_PAD_PARAMETER_H_ +#define MINDSPORE_NNACL_PAD_PARAMETER_H_ + +#include "nnacl/op_base.h" + +#define MAX_PAD_SIZE 8 +#define DEFAULT_PAD_NDIMS 4 + +typedef struct PadQuantArg { + QuantArg *in_quant_args_; + QuantArg *out_quanr_args_; + int8_t *constant_value_; +} PadQuantArg; + +typedef struct PadParameter { + // Primitive parameter + OpParameter op_parameter_; + int paddings_[MAX_SHAPE_SIZE]; + int pad_mode_; + float constant_value_; + // shape correlative + int padding_length; + // other parameter + int in_strides[COMM_SHAPE_SIZE]; + int out_strides[DEFAULT_PAD_NDIMS]; + int mirror_offset_; + PadQuantArg pad_quant_arg_; +} PadParameter; + +typedef struct MirrorPadBlock { + int out_offset_; + int out_stride_[DEFAULT_PAD_NDIMS]; + int size_[DEFAULT_PAD_NDIMS]; +} MirrorPadBlock; + +#endif // MINDSPORE_NNACL_PAD_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/pooling_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/pooling_parameter.h new file mode 100644 index 0000000000..76e8914723 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/pooling_parameter.h @@ -0,0 +1,57 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_POOLING_PARAMETER_H_ +#define MINDSPORE_NNACL_POOLING_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef enum PoolMode { PoolMode_No, PoolMode_MaxPool, PoolMode_AvgPool } PoolMode; + +typedef enum RoundMode { RoundMode_No, RoundMode_Ceil, RoundMode_Floor } RoundMode; + +typedef struct PoolingParameter { + // Primitive parameter + OpParameter op_parameter_; + PoolMode pool_mode_; + RoundMode round_mode_; + PadMode pad_mode_; + ActType act_type_; + int avg_mode_; + bool global_; + int window_w_; + int window_h_; + int stride_w_; + int stride_h_; + // shape correlative + int input_w_; + int input_h_; + int input_batch_; + int input_channel_; + int output_w_; + int output_h_; + int output_batch_; + int output_channel_; + int pad_u_; + int pad_d_; + int pad_l_; + int pad_r_; + // other parameter + int thread_num_; + QuantArg **quant_args_; + bool quantize_; +} PoolingParameter; + +#endif // MINDSPORE_NNACL_POOLING_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/power_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/power_parameter.h new file mode 100644 index 0000000000..25c7745650 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/power_parameter.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_POWER_PARAMETER_H_ +#define MINDSPORE_NNACL_POWER_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct PowerQuantArg { + QuantArg in_args_; + QuantArg exp_args_; + QuantArg out_args_; + int output_activation_min_; + int output_activation_max_; +} PowerQuantArg; + +typedef struct PowerParameter { + // Primitive parameter + OpParameter op_parameter_; + float power_; + float scale_; + float shift_; + // other parameter + PowerQuantArg quant_arg_; + bool broadcast_; +} PowerParameter; + +#endif // MINDSPORE_NNACL_POWER_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/predict_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/predict_parameter.h new file mode 100644 index 0000000000..b2901ff699 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/predict_parameter.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_PREDICT_PARAMETER_H_ +#define MINDSPORE_NNACL_PREDICT_PARAMETER_H_ + +#include "nnacl/op_base.h" +typedef struct { + // Primitive parameter + OpParameter op_parameter_; + // other parameter + int output_num; + float weight_threshold; +} PredictParameter; + +typedef struct { + int label; + float weight; +} LabelInfo; +#endif // MINDSPORE_NNACL_PREDICT_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/prelu_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/prelu_parameter.h new file mode 100644 index 0000000000..9fdaa1d59d --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/prelu_parameter.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_PRELU_PARAMETER_H_ +#define MINDSPORE_NNACL_PRELU_PARAMETER_H_ + +#include "nnacl/op_base.h" +typedef struct PReluParameter { + // Primitive parameter + OpParameter op_parameter_; + // other parameter + float *slope_; + bool channelShared; + int tile_block_; + int channel_num_; + int input_num_; +} PReluParameter; + +#endif // MINDSPORE_NNACL_PRELU_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/prior_box_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/prior_box_parameter.h new file mode 100644 index 0000000000..699e2a2028 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/prior_box_parameter.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_PRIOR_BOX_PARAMETER_H_ +#define MINDSPORE_NNACL_PRIOR_BOX_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct PriorBoxParameter { + // Primitive parameter + OpParameter op_parameter_; + int32_t min_sizes_size; + int32_t min_sizes[MAX_SHAPE_SIZE]; + int32_t max_sizes_size; + int32_t max_sizes[MAX_SHAPE_SIZE]; + int32_t aspect_ratios_size; + float aspect_ratios[MAX_SHAPE_SIZE]; + float variances[COMM_SHAPE_SIZE]; + int32_t image_size_w; + int32_t image_size_h; + float step_w; + float step_h; + bool clip; + bool flip; + float offset; +} PriorBoxParameter; + +#endif // MINDSPORE_NNACL_PRIOR_BOX_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/random_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/random_parameter.h new file mode 100644 index 0000000000..9907617c24 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/random_parameter.h @@ -0,0 +1,27 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_RNADOM_PARAMETER_H_ +#define MINDSPORE_NNACL_RNADOM_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct RandomParam { + OpParameter op_parameter_; + int seed_; + int seed2_; +} RandomParam; + +#endif // MINDSPORE_NNACL_RNADOM_STANDARD_NORMAL_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/reduce_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/reduce_parameter.h new file mode 100644 index 0000000000..e0752d0c29 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/reduce_parameter.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_REDUCE_PARAMETER_H_ +#define MINDSPORE_NNACL_REDUCE_PARAMETER_H_ +#include "nnacl/op_base.h" + +typedef struct ReduceParameter { + // primitive parameter + OpParameter op_parameter_; + int axes_[MAX_SHAPE_SIZE]; + bool keep_dims_; + int mode_; + bool reduce_to_end_; + float coeff; + + // other parameter + int num_axes_; +} ReduceParameter; + +#endif // MINDSPORE_NNACL_REDUCE_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/reshape_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/reshape_parameter.h new file mode 100644 index 0000000000..c23fbbff66 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/reshape_parameter.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_RESHAHPE_PARAMETER_H_ +#define MINDSPORE_NNACL_RESHAHPE_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct ReshapeQuantArg { + QuantArg in_args_; + QuantArg out_args_; + int output_activation_min_; + int output_activation_max_; +} ReshapeQuantArg; + +typedef struct ReshapeParameter { + // primitive parameter + OpParameter op_parameter_; + int shape_dim_; + int shape_[MAX_SHAPE_SIZE]; + + // other parameter + ReshapeQuantArg quant_para_; + int thread_count_; +} ReshapeParameter; + +#endif // MINDSPORE_NNACL_RESHAHPE_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/resize_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/resize_parameter.h new file mode 100644 index 0000000000..507945f6ea --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/resize_parameter.h @@ -0,0 +1,37 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_RESIZE_PARAMETER_H_ +#define MINDSPORE_NNACL_RESIZE_PARAMETER_H_ + +#include "nnacl/op_base.h" +typedef struct ResizeParameter { + // primitive parameter + OpParameter op_parameter_; + int method_; + int64_t new_height_; + int64_t new_width_; + int coordinate_transform_mode_; + float cubic_coeff_; + bool preserve_aspect_ratio_; +} ResizeParameter; + +typedef struct CropAndResizeParameter { + // primitive parameter + OpParameter op_parameter_; + int method_; + float extrapolation_value_; +} CropAndResizeParameter; +#endif // MINDSPORE_NNACL_RESIZE_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/reverse_sequence_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/reverse_sequence_parameter.h new file mode 100644 index 0000000000..da0305dd0c --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/reverse_sequence_parameter.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_REVERSE_SEQUENCE_PARAMETER_H_ +#define MINDSPORE_NNACL_REVERSE_SEQUENCE_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct ReverseSequenceParameter { + // primitive parameter + OpParameter op_parameter_; + int seq_axis_; + int batch_axis_; + + // shape correlative + int input_shape0_[5]; + int output_shape_[5]; + int input_stride_[5]; + int output_stride_[5]; + + // other parameter + int ndim_; + int outer_count_; + int outer_stride_; + int inner_count_; + int inner_stride_; + int copy_byte_size_; + int total_data_size_; + bool is_seq_length_int32_; +} ReverseSequenceParameter; + +#endif // MINDSPORE_NNACL_REVERSE_SEQUENCE_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/scale.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/scale.h new file mode 100644 index 0000000000..9faf063be8 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/scale.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_SCALE_H_ +#define MINDSPORE_NNACL_SCALE_H_ + +#include "nnacl/op_base.h" + +typedef struct ScaleParameter { + // primitive parameter + OpParameter op_parameter_; + int axis_; + int activation_type_; + + // shape correlative + int outer_size_; + int axis_size_; + int inner_size_; + + // other parameter + bool const_scale_; + bool const_offset_; + QuantMulArg scale_mul_arg_; + QuantMulArg offset_mul_arg_; + int input_zp_; + int scale_zp_; + int offset_zp_; + int output_zp_; + int output_activation_min_; + int output_activation_max_; +} ScaleParameter; + +#endif // MINDSPORE_NNACL_SCALE_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/sigmoid_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/sigmoid_parameter.h new file mode 100644 index 0000000000..b17b1026e8 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/sigmoid_parameter.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_SIGMOID_PARAMETER_H_ +#define MINDSPORE_NNACL_SIGMOID_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct SigmoidParameter { + // primitive parameter + OpParameter op_parameter_; + + // shape correlative + const int *in_shape_; + const int *out_shape_; + + // other parameter + SigmoidQuantArg quant_arg; + double alpha_; + int thread_count_; + int64_t offset_[MAX_SHAPE_SIZE]; + int64_t in_offset_[MAX_SHAPE_SIZE]; + int64_t axis_; + int input_dim_; + int element_num; +} SigmoidParameter; + +#endif // MINDSPORE_NNACL_SIGMOID_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/skip_gram_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/skip_gram_parameter.h new file mode 100644 index 0000000000..fa4f989294 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/skip_gram_parameter.h @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_SKIP_GRAM_PARAMETER_H_ +#define MINDSPORE_NNACL_SKIP_GRAM_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct SkipGramParameter { + // primitive parameter + OpParameter op_parameter_; + bool include_all_ngrams; + int max_skip_size; + int ngram_size; +} SkipGramParameter; + +#endif // MINDSPORE_NNACL_SKIP_GRAM_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/slice_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/slice_parameter.h new file mode 100644 index 0000000000..0a99214bdd --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/slice_parameter.h @@ -0,0 +1,45 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_SLICE_PARAMETER_H_ +#define MINDSPORE_NNACL_SLICE_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct SliceQuantArg { + QuantArg in_args_; + QuantArg out_args_; + int output_activation_min_; + int output_activation_max_; +} SliceQuantArg; + +typedef struct SliceParameter { + // primitive parameter + OpParameter op_parameter_; + + // shape correlative + int32_t shape_[COMM_SHAPE_SIZE]; + int32_t begin_[COMM_SHAPE_SIZE]; + int32_t end_[COMM_SHAPE_SIZE]; + int32_t size_[COMM_SHAPE_SIZE]; + int32_t axis_[COMM_SHAPE_SIZE]; + + // other parameter + SliceQuantArg quant_arg_; + int32_t param_length_; +} SliceParameter; + +#endif // MINDSPORE_NNACL_SLICE_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/softmax_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/softmax_parameter.h new file mode 100644 index 0000000000..04b978fd68 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/softmax_parameter.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_SOFTMAX_PARAMETER_H_ +#define MINDSPORE_NNACL_SOFTMAX_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct SoftmaxParameter { + // primitive parameter + OpParameter op_parameter_; + int32_t axis_; + + // shape correlative + int input_shape_[5]; + + // other parameter + int element_size_; + int n_dim_; +} SoftmaxParameter; + +#endif // MINDSPORE_NNACL_SOFTMAX_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/space_to_depth_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/space_to_depth_parameter.h similarity index 100% rename from mindspore/lite/nnacl/space_to_depth_parameter.h rename to mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/space_to_depth_parameter.h diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/sparse_to_dense_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/sparse_to_dense_parameter.h new file mode 100644 index 0000000000..783eec124a --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/sparse_to_dense_parameter.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_SPARSE_TO_DENSE_PARAMETER_H_ +#define MINDSPORE_NNACL_SPARSE_TO_DENSE_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct SparseToDenseParameter { + // primitive parameter + OpParameter op_parameter_; + bool validate_indices_; + + // other parameter + int thread_num_; +} SparseToDenseParameter; + +#endif // MINDSPORE_NNACL_SPARSE_TO_DENSE_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/splice_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/splice_parameter.h new file mode 100644 index 0000000000..b20460860f --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/splice_parameter.h @@ -0,0 +1,29 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_SPLICE_PARAMETER_H_ +#define MINDSPORE_NNACL_SPLICE_PARAMETER_H_ +#include "nnacl/op_base.h" +typedef struct SpliceParameter { + OpParameter op_parameter_; + int context_dim_; + int forward_indexes_dim_; + int src_to_dst_row_offset_; + int *context_; + int *forward_indexes_; + int output_dim_; +} SpliceParameter; +#endif // MINDSPORE_NNACL_SPLICE_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/split_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/split_parameter.h new file mode 100644 index 0000000000..5346bb87f9 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/split_parameter.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_SPLIT_PARAMETER_H_ +#define MINDSPORE_NNACL_SPLIT_PARAMETER_H_ + +#include "nnacl/op_base.h" + +#define SPLIT_STRIDES_SIZE 32 + +typedef struct SplitQuantArg { + QuantArg in_args_; + QuantArg out_args_[20]; + int output_activation_min_; + int output_activation_max_; +} SplitQuantArg; + +typedef struct SplitParameter { + // primitive parameter + OpParameter op_parameter_; + int num_split_; + int *split_sizes_; + int split_dim_; + + // shape correlative + int strides_[SPLIT_STRIDES_SIZE]; + + // other parameter + SplitQuantArg quant_arg_; + int n_dims_; + int split_count_; +} SplitParameter; + +#endif // MINDSPORE_NNACL_SPLIT_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/squeeze_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/squeeze_parameter.h new file mode 100644 index 0000000000..a5b47bf0c7 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/squeeze_parameter.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_SQUEEZE_PARAMETER_H_ +#define MINDSPORE_NNACL_SQUEEZE_PARAMETER_H_ +#include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +#define SQUEEZE_OFFSET_MAX_SIZE 4 + +typedef struct SqueezeQuantArg { + QuantArg *in_quant_args_; + QuantArg *out_quant_args_; +} SqueezeQuantArg; + +typedef struct SqueezeParameter { + // primitive parameter + OpParameter op_parameter_; + int axis_[8]; + size_t axis_size_; + + // shape correlative + const int *in_shape_; + const int *out_shape_; + int offset_size_; + int64_t offset_[SQUEEZE_OFFSET_MAX_SIZE]; + int64_t in_offset_[SQUEEZE_OFFSET_MAX_SIZE]; + int input_dim_; + // other parameter + SqueezeQuantArg quant_arg; +} SqueezeParameter; + +#endif // MINDSPORE_NNACL_SQUEEZE_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/stack_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/stack_parameter.h new file mode 100644 index 0000000000..d713e74bd5 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/stack_parameter.h @@ -0,0 +1,27 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_STACK_PARAMETER_H_ +#define MINDSPORE_NNACL_STACK_PARAMETER_H_ + +#include "nnacl/op_base.h" +typedef struct StackParameter { + // primitive parameter + OpParameter op_parameter_; + int32_t axis_; +} StackParameter; + +#endif // MINDSPORE_NNACL_STACK_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/strided_slice_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/strided_slice_parameter.h new file mode 100644 index 0000000000..173e846f6a --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/strided_slice_parameter.h @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_STRIDED_SLICE_PARAMETER_H_ +#define MINDSPORE_NNACL_STRIDED_SLICE_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct StridedSliceParameter { + // primitive parameter + OpParameter op_parameter_; + int begins_[MAX_SHAPE_SIZE]; + int ends_[MAX_SHAPE_SIZE]; + int strides_[MAX_SHAPE_SIZE]; + int isScale; + + // shape correlative + int in_shape_length_; + int in_shape_[MAX_SHAPE_SIZE]; + + // other parameter + int num_axes_; + LiteDataType data_type; + int begins_mask_; + int ends_mask_; + int ellipsisMask_; + int newAxisMask_; + int shrinkAxisMask_; +} StridedSliceParameter; + +#endif // MINDSPORE_NNACL_STRIDED_SLICE_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/tensor_c.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/tensor_c.h new file mode 100644 index 0000000000..42b419c9c8 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/tensor_c.h @@ -0,0 +1,29 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_TENSOR_C_H_ +#define MINDSPORE_NNACL_TENSOR_C_H_ +#include "nnacl/op_base.h" + +typedef struct TensorC { + bool is_ready_; + int data_type_; + int format_; + void *data_; + size_t shape_size_; + int shape_[MAX_SHAPE_SIZE]; +} TensorC; + +#endif // MINDSPORE_NNACL_TENSOR_C_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/tensorlist_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/tensorlist_parameter.h new file mode 100644 index 0000000000..27e5c6b619 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/tensorlist_parameter.h @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_TENSORLIST_PARAMETER_H_ +#define MINDSPORE_NNACL_TENSORLIST_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct TensorListParameter { + // primitive parameter + OpParameter op_parameter_; + int shape_type_; + int element_dtype_; + + // other parameter + int num_element_; +} TensorListParameter; + +#endif // MINDSPORE_NNACL_ARG_TENSORLIST_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/transpose.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/transpose.h new file mode 100644 index 0000000000..6db866f982 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/transpose.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_TRANSPOSE_H_ +#define MINDSPORE_NNACL_TRANSPOSE_H_ + +#include "nnacl/op_base.h" + +#define MAX_TRANSPOSE_DIM_SIZE 6 + +typedef struct TransposeParameter { + // primitive parameter + OpParameter op_parameter_; + int perm_[MAX_SHAPE_SIZE]; + size_t perm_size_; + bool conjugate_; + + // shape correlative + int strides_[MAX_SHAPE_SIZE]; + int out_strides_[MAX_SHAPE_SIZE]; + + // other parameter + int num_axes_; + int data_size_; +} TransposeParameter; + +#endif // MINDSPORE_NNACL_TRANSPOSE_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/unsqueeze_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/unsqueeze_parameter.h new file mode 100644 index 0000000000..a598ac395b --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/unsqueeze_parameter.h @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_UNSQUEEZE_PARAMETER_H_ +#define MINDSPORE_NNACL_UNSQUEEZE_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct UnSqueezeQuantArg { + int *output_shape_; + float alpha; + int axis_; + size_t input_num_; + QuantArg in_quant_args_; + QuantArg out_quant_args_; +} UnSqueezeQuantArg; + +typedef struct UnSqueezeParameter { + // primitive parameter + OpParameter op_parameter_; + int dims_[COMM_SHAPE_SIZE]; + int num_dim_; + + // shape correlative + const int *in_shape_; + const int *out_shape_; + int64_t offset_[COMM_SHAPE_SIZE]; + int64_t axis_; + + // other parameter + UnSqueezeQuantArg quant_arg; + int thread_count_; +} UnSqueezeParameter; + +#endif // MINDSPORE_NNACL_UNSQUEEZE_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/unstack_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/unstack_parameter.h new file mode 100644 index 0000000000..fa52f4a5bd --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/unstack_parameter.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_UNSTACK_PARAMETER_H_ +#define MINDSPORE_NNACL_UNSTACK_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct UnstackParameter { + // primitive parameter + OpParameter op_parameter_; + int num_; + int axis_; + + // other parameter + int pre_dims_; + int axis_dim_; + int after_dims_; +} UnstackParameter; + +#endif // MINDSPORE_NNACL_UNSTACK_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/upsample_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/upsample_parameter.h new file mode 100644 index 0000000000..5f328fbe78 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/upsample_parameter.h @@ -0,0 +1,29 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_NNACL_UPSAMPLE_PARAMETER_H_ +#define MINDSPORE_NNACL_UPSAMPLE_PARAMETER_H_ + +#include "nnacl/op_base.h" +typedef struct { + // primitive parameter + OpParameter op_parameter_; + + // other parameter + int method_; // 0 for bilinear; 1 for nearest +} UpsampleParameter; + +#endif // MINDSPORE_NNACL_UPSAMPLE_PARAMETER_H_ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/where_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/where_parameter.h new file mode 100644 index 0000000000..73481b7b49 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/where_parameter.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_WHERE_PARAMETER_H_ +#define MINDSPORE_NNACL_WHERE_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct WhereParameter { + // primitive parameter + OpParameter op_parameter_; + + // other parameter + int condition_num_; + int x_num_; + int y_num_; + int max_num_; + + int rank_; + int thread_num_; +} WhereParameter; + +#endif // MINDSPORE_NNACL_WHERE_PARAMETER_H_ diff --git a/mindspore/lite/CMakeLists.txt b/mindspore/lite/CMakeLists.txt index 07be73fc50..37f1affff2 100644 --- a/mindspore/lite/CMakeLists.txt +++ b/mindspore/lite/CMakeLists.txt @@ -276,7 +276,7 @@ if(BUILD_MINDDATA STREQUAL "lite_cv") endif() add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src) -add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/nnacl) +add_subdirectory(${CCSRC_DIR}/backend/kernel_compiler/cpu/nnacl build) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/micro/coder) if(NOT APPLE AND ENABLE_TOOLS) if(SUPPORT_TRAIN) diff --git a/mindspore/lite/micro/cmake/file_list.cmake b/mindspore/lite/micro/cmake/file_list.cmake index 1c6c316d94..8777af0662 100644 --- a/mindspore/lite/micro/cmake/file_list.cmake +++ b/mindspore/lite/micro/cmake/file_list.cmake @@ -176,115 +176,117 @@ set(LITE_SRC ### tools ${LITE_DIR}/tools/common/flag_parser.cc ) + +set(NNACL_DIR ${TOP_DIR}/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl) set(LITE_KERNEL_SRC ### nnacl - ${LITE_DIR}/nnacl/common_func.c - ${LITE_DIR}/nnacl/base/minimal_filtering_generator.c - ${LITE_DIR}/nnacl/base/arithmetic_base.c - ${LITE_DIR}/nnacl/base/slice_base.c - ${LITE_DIR}/nnacl/fp32/winograd_utils.c - ${LITE_DIR}/nnacl/fp32/pack_fp32.c - ${LITE_DIR}/nnacl/int8/quantize.c - ${LITE_DIR}/nnacl/int8/pack_int8.c - ${LITE_DIR}/nnacl/int8/matmul_int8.c - ${LITE_DIR}/nnacl/int8/fixed_point.c - ${LITE_DIR}/nnacl/fp32/matmul_fp32.c - ${LITE_DIR}/nnacl/int8/arithmetic_int8.c - ${LITE_DIR}/nnacl/int8/add_int8.c - ${LITE_DIR}/nnacl/int8/concat_int8.c - ${LITE_DIR}/nnacl/int8/conv_int8.c - ${LITE_DIR}/nnacl/int8/conv3x3_int8.c - ${LITE_DIR}/nnacl/int8/conv1x1_int8.c - ${LITE_DIR}/nnacl/base/conv1x1_base.c - ${LITE_DIR}/nnacl/int8/conv_depthwise_int8.c - ${LITE_DIR}/nnacl/int8/deconv_int8.c - ${LITE_DIR}/nnacl/int8/common_func_int8.c - ${LITE_DIR}/nnacl/int8/slice_int8.c - ${LITE_DIR}/nnacl/int8/batchnorm_int8.c - ${LITE_DIR}/nnacl/int8/sub_int8.c - ${LITE_DIR}/nnacl/int8/quant_dtype_cast_int8.c - ${LITE_DIR}/nnacl/int8/sigmoid_int8.c - ${LITE_DIR}/nnacl/int8/resize_int8.c + ${NNACL_DIR}/common_func.c + ${NNACL_DIR}/base/minimal_filtering_generator.c + ${NNACL_DIR}/base/arithmetic_base.c + ${NNACL_DIR}/base/slice_base.c + ${NNACL_DIR}/fp32/winograd_utils.c + ${NNACL_DIR}/fp32/pack_fp32.c + ${NNACL_DIR}/int8/quantize.c + ${NNACL_DIR}/int8/pack_int8.c + ${NNACL_DIR}/int8/matmul_int8.c + ${NNACL_DIR}/int8/fixed_point.c + ${NNACL_DIR}/fp32/matmul_fp32.c + ${NNACL_DIR}/int8/arithmetic_int8.c + ${NNACL_DIR}/int8/add_int8.c + ${NNACL_DIR}/int8/concat_int8.c + ${NNACL_DIR}/int8/conv_int8.c + ${NNACL_DIR}/int8/conv3x3_int8.c + ${NNACL_DIR}/int8/conv1x1_int8.c + ${NNACL_DIR}/base/conv1x1_base.c + ${NNACL_DIR}/int8/conv_depthwise_int8.c + ${NNACL_DIR}/int8/deconv_int8.c + ${NNACL_DIR}/int8/common_func_int8.c + ${NNACL_DIR}/int8/slice_int8.c + ${NNACL_DIR}/int8/batchnorm_int8.c + ${NNACL_DIR}/int8/sub_int8.c + ${NNACL_DIR}/int8/quant_dtype_cast_int8.c + ${NNACL_DIR}/int8/sigmoid_int8.c + ${NNACL_DIR}/int8/resize_int8.c ### infer - ${LITE_DIR}/nnacl/infer/adam_infer.c - ${LITE_DIR}/nnacl/infer/add_sub_grad_infer.c - ${LITE_DIR}/nnacl/infer/addn_infer.c - ${LITE_DIR}/nnacl/infer/apply_momentum_infer.c - ${LITE_DIR}/nnacl/infer/argmin_max_infer.c - ${LITE_DIR}/nnacl/infer/arithmetic_compare_infer.c - ${LITE_DIR}/nnacl/infer/arithmetic_grad_infer.c - ${LITE_DIR}/nnacl/infer/arithmetic_infer.c - ${LITE_DIR}/nnacl/infer/assign_add_infer.c - ${LITE_DIR}/nnacl/infer/assign_infer.c - ${LITE_DIR}/nnacl/infer/batch_to_space_infer.c - ${LITE_DIR}/nnacl/infer/bias_grad_infer.c - ${LITE_DIR}/nnacl/infer/binary_cross_entropy_infer.c - ${LITE_DIR}/nnacl/infer/bn_grad_infer.c - ${LITE_DIR}/nnacl/infer/broadcast_to_infer.c - ${LITE_DIR}/nnacl/infer/cast_infer.c - ${LITE_DIR}/nnacl/infer/common_infer.c - ${LITE_DIR}/nnacl/infer/concat_infer.c - ${LITE_DIR}/nnacl/infer/constant_of_shape_infer.c - ${LITE_DIR}/nnacl/infer/conv2d_grad_filter_infer.c - ${LITE_DIR}/nnacl/infer/conv2d_grad_input_infer.c - ${LITE_DIR}/nnacl/infer/conv2d_infer.c - ${LITE_DIR}/nnacl/infer/deconv2d_infer.c - ${LITE_DIR}/nnacl/infer/dedepthwise_conv2d_infer.c - ${LITE_DIR}/nnacl/infer/depthwise_conv2d_infer.c - ${LITE_DIR}/nnacl/infer/detection_post_process_infer.c - ${LITE_DIR}/nnacl/infer/expand_dims_infer.c - ${LITE_DIR}/nnacl/infer/fill_infer.c - ${LITE_DIR}/nnacl/infer/full_connection_infer.c - ${LITE_DIR}/nnacl/infer/fused_batchnorm_infer.c - ${LITE_DIR}/nnacl/infer/gather_infer.c - ${LITE_DIR}/nnacl/infer/gather_nd_infer.c - ${LITE_DIR}/nnacl/infer/group_conv2d_grad_input_infer.c - ${LITE_DIR}/nnacl/infer/infer_register.c - ${LITE_DIR}/nnacl/infer/lsh_projection_infer.c - ${LITE_DIR}/nnacl/infer/lstm_infer.c - ${LITE_DIR}/nnacl/infer/matmul_infer.c - ${LITE_DIR}/nnacl/infer/max_min_grad_infer.c - ${LITE_DIR}/nnacl/infer/mean_infer.c - ${LITE_DIR}/nnacl/infer/pooling_grad_infer.c - ${LITE_DIR}/nnacl/infer/pooling_infer.c - ${LITE_DIR}/nnacl/infer/power_infer.c - ${LITE_DIR}/nnacl/infer/quant_dtype_cast_infer.c - ${LITE_DIR}/nnacl/infer/range_infer.c - ${LITE_DIR}/nnacl/infer/rank_infer.c - ${LITE_DIR}/nnacl/infer/reduce_infer.c - ${LITE_DIR}/nnacl/infer/reshape_infer.c - ${LITE_DIR}/nnacl/infer/resize_infer.c - ${LITE_DIR}/nnacl/infer/roi_pooling_infer.c - ${LITE_DIR}/nnacl/infer/select_infer.c - ${LITE_DIR}/nnacl/infer/sgd_infer.c - ${LITE_DIR}/nnacl/infer/shape_infer.c - ${LITE_DIR}/nnacl/infer/slice_infer.c - ${LITE_DIR}/nnacl/infer/softmax_cross_entropy_infer.c - ${LITE_DIR}/nnacl/infer/softmax_infer.c - ${LITE_DIR}/nnacl/infer/space_to_batch_infer.c - ${LITE_DIR}/nnacl/infer/space_to_batch_nd_infer.c - ${LITE_DIR}/nnacl/infer/space_to_depth_infer.c - ${LITE_DIR}/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.c - ${LITE_DIR}/nnacl/infer/sparse_to_dense_infer.c - ${LITE_DIR}/nnacl/infer/split_infer.c - ${LITE_DIR}/nnacl/infer/squeeze_infer.c - ${LITE_DIR}/nnacl/infer/strided_slice_grad_infer.c - ${LITE_DIR}/nnacl/infer/strided_slice_infer.c - ${LITE_DIR}/nnacl/infer/tile_infer.c - ${LITE_DIR}/nnacl/infer/topk_infer.c - ${LITE_DIR}/nnacl/infer/transpose_infer.c - ${LITE_DIR}/nnacl/infer/unsorted_segment_sum_infer.c - ${LITE_DIR}/nnacl/infer/unsqueeze_infer.c - ${LITE_DIR}/nnacl/infer/where_infer.c - ${LITE_DIR}/nnacl/infer/while_infer.c - ${LITE_DIR}/nnacl/infer/splice_infer.c + ${NNACL_DIR}/infer/adam_infer.c + ${NNACL_DIR}/infer/add_sub_grad_infer.c + ${NNACL_DIR}/infer/addn_infer.c + ${NNACL_DIR}/infer/apply_momentum_infer.c + ${NNACL_DIR}/infer/argmin_max_infer.c + ${NNACL_DIR}/infer/arithmetic_compare_infer.c + ${NNACL_DIR}/infer/arithmetic_grad_infer.c + ${NNACL_DIR}/infer/arithmetic_infer.c + ${NNACL_DIR}/infer/assign_add_infer.c + ${NNACL_DIR}/infer/assign_infer.c + ${NNACL_DIR}/infer/batch_to_space_infer.c + ${NNACL_DIR}/infer/bias_grad_infer.c + ${NNACL_DIR}/infer/binary_cross_entropy_infer.c + ${NNACL_DIR}/infer/bn_grad_infer.c + ${NNACL_DIR}/infer/broadcast_to_infer.c + ${NNACL_DIR}/infer/cast_infer.c + ${NNACL_DIR}/infer/common_infer.c + ${NNACL_DIR}/infer/concat_infer.c + ${NNACL_DIR}/infer/constant_of_shape_infer.c + ${NNACL_DIR}/infer/conv2d_grad_filter_infer.c + ${NNACL_DIR}/infer/conv2d_grad_input_infer.c + ${NNACL_DIR}/infer/conv2d_infer.c + ${NNACL_DIR}/infer/deconv2d_infer.c + ${NNACL_DIR}/infer/dedepthwise_conv2d_infer.c + ${NNACL_DIR}/infer/depthwise_conv2d_infer.c + ${NNACL_DIR}/infer/detection_post_process_infer.c + ${NNACL_DIR}/infer/expand_dims_infer.c + ${NNACL_DIR}/infer/fill_infer.c + ${NNACL_DIR}/infer/full_connection_infer.c + ${NNACL_DIR}/infer/fused_batchnorm_infer.c + ${NNACL_DIR}/infer/gather_infer.c + ${NNACL_DIR}/infer/gather_nd_infer.c + ${NNACL_DIR}/infer/group_conv2d_grad_input_infer.c + ${NNACL_DIR}/infer/infer_register.c + ${NNACL_DIR}/infer/lsh_projection_infer.c + ${NNACL_DIR}/infer/lstm_infer.c + ${NNACL_DIR}/infer/matmul_infer.c + ${NNACL_DIR}/infer/max_min_grad_infer.c + ${NNACL_DIR}/infer/mean_infer.c + ${NNACL_DIR}/infer/pooling_grad_infer.c + ${NNACL_DIR}/infer/pooling_infer.c + ${NNACL_DIR}/infer/power_infer.c + ${NNACL_DIR}/infer/quant_dtype_cast_infer.c + ${NNACL_DIR}/infer/range_infer.c + ${NNACL_DIR}/infer/rank_infer.c + ${NNACL_DIR}/infer/reduce_infer.c + ${NNACL_DIR}/infer/reshape_infer.c + ${NNACL_DIR}/infer/resize_infer.c + ${NNACL_DIR}/infer/roi_pooling_infer.c + ${NNACL_DIR}/infer/select_infer.c + ${NNACL_DIR}/infer/sgd_infer.c + ${NNACL_DIR}/infer/shape_infer.c + ${NNACL_DIR}/infer/slice_infer.c + ${NNACL_DIR}/infer/softmax_cross_entropy_infer.c + ${NNACL_DIR}/infer/softmax_infer.c + ${NNACL_DIR}/infer/space_to_batch_infer.c + ${NNACL_DIR}/infer/space_to_batch_nd_infer.c + ${NNACL_DIR}/infer/space_to_depth_infer.c + ${NNACL_DIR}/infer/sparse_softmax_cross_entropy_with_logits_infer.c + ${NNACL_DIR}/infer/sparse_to_dense_infer.c + ${NNACL_DIR}/infer/split_infer.c + ${NNACL_DIR}/infer/squeeze_infer.c + ${NNACL_DIR}/infer/strided_slice_grad_infer.c + ${NNACL_DIR}/infer/strided_slice_infer.c + ${NNACL_DIR}/infer/tile_infer.c + ${NNACL_DIR}/infer/topk_infer.c + ${NNACL_DIR}/infer/transpose_infer.c + ${NNACL_DIR}/infer/unsorted_segment_sum_infer.c + ${NNACL_DIR}/infer/unsqueeze_infer.c + ${NNACL_DIR}/infer/where_infer.c + ${NNACL_DIR}/infer/while_infer.c + ${NNACL_DIR}/infer/splice_infer.c ) #### sse if("${X86_64_SIMD}" STREQUAL "sse") set(SSE_SRC - ${LITE_DIR}/nnacl/intrinsics/sse/sse_common.c - ${LITE_DIR}/nnacl/intrinsics/sse/MatMul_Sse.c + ${NNACL_DIR}/intrinsics/sse/sse_common.c + ${NNACL_DIR}/intrinsics/sse/MatMul_Sse.c ) set_property(SOURCE ${SSE_SRC} PROPERTY LANGUAGE C) endif() @@ -294,10 +296,10 @@ if("${X86_64_SIMD}" STREQUAL "avx") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.1 -mavx -mavx2") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.1 -mavx -mavx2") set(AVX_SRC - ${LITE_DIR}/nnacl/intrinsics/avx/common_utils.c - ${LITE_DIR}/nnacl/intrinsics/sse/sse_common.c - ${LITE_DIR}/nnacl/intrinsics/sse/MatMul_Sse.c - ${LITE_DIR}/nnacl/assembly/avx/MatmulAvx.S + ${NNACL_DIR}/intrinsics/avx/common_utils.c + ${NNACL_DIR}/intrinsics/sse/sse_common.c + ${NNACL_DIR}/intrinsics/sse/MatMul_Sse.c + ${NNACL_DIR}/assembly/avx/MatmulAvx.S ) set_property(SOURCE ${AVX_SRC} PROPERTY LANGUAGE C) endif() diff --git a/mindspore/lite/micro/coder/CMakeLists.txt b/mindspore/lite/micro/coder/CMakeLists.txt index 3463649c78..4c53e33503 100644 --- a/mindspore/lite/micro/coder/CMakeLists.txt +++ b/mindspore/lite/micro/coder/CMakeLists.txt @@ -18,6 +18,7 @@ include_directories(${3RD_DIR}/flatbuffers/include) #include ms include_directories(${TOP_DIR}/) include_directories(${TOP_DIR}/mindspore/core/) +include_directories(${TOP_DIR}/mindspore/ccsrc/backend/kernel_compiler/cpu) include_directories(${LITE_DIR}) include_directories(${MICRO_DIR}) #include coder diff --git a/mindspore/lite/nnacl/adder.h b/mindspore/lite/nnacl/adder.h deleted file mode 100644 index 00c92796f3..0000000000 --- a/mindspore/lite/nnacl/adder.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_ADDER_H_ -#define MINDSPORE_LITE_NNACL_ADDER_H_ - -#include "nnacl/op_base.h" - -typedef struct AdderParameter { - OpParameter op_parameter_; -} AdderParameter; - -#ifdef __cplusplus -extern "C" { -#endif - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_ADDER_H_ diff --git a/mindspore/lite/nnacl/arg_min_max_parameter.h b/mindspore/lite/nnacl/arg_min_max_parameter.h deleted file mode 100644 index 9569d958ff..0000000000 --- a/mindspore/lite/nnacl/arg_min_max_parameter.h +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_ARG_MIN_MAX_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_ARG_MIN_MAX_PARAMETER_H_ - -#ifdef ENABLE_ARM64 -#include -#endif -#include "nnacl/op_base.h" - -typedef int (*COMPARE_FUNCTION)(const void *a, const void *b); - -typedef struct ArgElement { - uint32_t index_; - union ArgData { - int8_t i8_data_; - int32_t i_data_; - float f_data_; -#ifdef ENABLE_ARM64 - float16_t f16_data_; -#endif - } data_; -} ArgElement; - -typedef struct ArgMinMaxParameter { - OpParameter op_parameter_; - bool out_value_; - bool keep_dims_; - bool get_max_; - int32_t axis_; - int32_t topk_; - int32_t axis_type_; - int32_t dims_size_; - int32_t data_type_; // equals to type_id - int32_t in_strides_[COMM_SHAPE_SIZE]; - int32_t out_strides_[COMM_SHAPE_SIZE]; - ArgElement *arg_elements_; -} ArgMinMaxParameter; - -#endif // MINDSPORE_LITE_NNACL_ARG_MIN_MAX_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/arithmetic.h b/mindspore/lite/nnacl/arithmetic.h deleted file mode 100644 index 198a2b6fce..0000000000 --- a/mindspore/lite/nnacl/arithmetic.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_ARTITHMETIC_H_ -#define MINDSPORE_LITE_NNACL_ARTITHMETIC_H_ - -#include "nnacl/op_base.h" -#include "nnacl/common_func.h" -#include "nnacl/nnacl_utils.h" - -typedef struct ArithmeticParameter { - OpParameter op_parameter_; - bool broadcasting_; - size_t ndim_; - int activation_type_; - int in_shape0_[10]; - int in_elements_num0_; - int in_shape1_[10]; - int in_elements_num1_; - - int out_shape_[10]; - int out_elements_num_; - - int in_strides0_[10]; - int in_strides1_[10]; - int out_strides_[10]; - - int multiples0_[10]; - int multiples1_[10]; - int eltwise_mode_; // eltwise need -} ArithmeticParameter; - -#endif // MINDSPORE_LITE_NNACL_ARTITHMETIC_H_ diff --git a/mindspore/lite/nnacl/arithmetic_self_parameter.h b/mindspore/lite/nnacl/arithmetic_self_parameter.h deleted file mode 100644 index d98eb72613..0000000000 --- a/mindspore/lite/nnacl/arithmetic_self_parameter.h +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_ARITHMETIC_SELF_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_ARITHMETIC_SELF_PARAMETER_H_ - -#include "nnacl/op_base.h" -#include "nnacl/errorcode.h" -#include "nnacl/int8/quantize.h" - -// For Abs, Cos, Exp, Log, Square, Sqrt, Rsqrt ops. -typedef struct ArithmeticSelfParameter { - OpParameter op_parameter_; - ArithSelfQuantArg quant_arg_; -} ArithmeticSelfParameter; - -#endif // MINDSPORE_LITE_NNACL_ARITHMETIC_SELF_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/assembly_global.h b/mindspore/lite/nnacl/assembly_global.h deleted file mode 100644 index 8ef79d8d0d..0000000000 --- a/mindspore/lite/nnacl/assembly_global.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_ASSEMBLY_GLOBAL_H -#define MINDSPORE_LITE_NNACL_ASSEMBLY_GLOBAL_H - -// clang-format off -.macro asm_function fname -#ifdef __APPLE__ -.globl _\fname -_\fname: -#else -.global \fname -#ifdef __ELF__ -.hidden \fname -.type \fname, %function -#endif -\fname: -#endif -.endm - -// clang-format on - -#endif // MINDSPORE_LITE_NNACL_ASSEMBLY_GLOBAL_H diff --git a/mindspore/lite/nnacl/base/arithmetic_base.h b/mindspore/lite/nnacl/base/arithmetic_base.h deleted file mode 100644 index 3e77c944c6..0000000000 --- a/mindspore/lite/nnacl/base/arithmetic_base.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_BASE_ARITHMETIC_BASE_H_ -#define MINDSPORE_LITE_NNACL_BASE_ARITHMETIC_BASE_H_ - -#include "nnacl/arithmetic.h" -#include "nnacl/nnacl_utils.h" -#include "nnacl/nnacl_common.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void CalcMultiplesAndStrides(ArithmeticParameter *param); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_BASE_ARITHMETIC_BASE_H_ diff --git a/mindspore/lite/nnacl/base/batch_to_space_base.h b/mindspore/lite/nnacl/base/batch_to_space_base.h deleted file mode 100644 index a4fd9548a7..0000000000 --- a/mindspore/lite/nnacl/base/batch_to_space_base.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_BATCH_TO_SPACE_BASE_H_ -#define MINDSPORE_LITE_NNACL_BATCH_TO_SPACE_BASE_H_ - -#include -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif -void BatchToSpaceNoCropForNHWC(const void *input, void *output, const int *in_shape, int out_n, const int *block, - int data_size); -void BatchToSpaceForNHWC(const void *input, void *output, const int *in_shape, int out_n, const int *block, - const int *crops, int data_size); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_BATCH_TO_SPACE_BASE_H_ diff --git a/mindspore/lite/nnacl/base/cast_base.h b/mindspore/lite/nnacl/base/cast_base.h deleted file mode 100644 index 757007b0b3..0000000000 --- a/mindspore/lite/nnacl/base/cast_base.h +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_CAST_BASE_H_ -#define MINDSPORE_LITE_NNACL_CAST_BASE_H_ - -#include "nnacl/op_base.h" -#include "nnacl/nnacl_common.h" - -#ifdef __cplusplus -extern "C" { -#endif - -inline void BoolToFloat32(const bool *input, float *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = (float)input[i]; - } -} - -inline void Uint8ToFloat32(const uint8_t *input, float *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = (float)input[i]; - } -} - -inline void Int32ToFloat32(const int32_t *input, float *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = (float)input[i]; - } -} - -inline void Int64ToFloat32(const int64_t *input, float *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = (float)input[i]; - } -} - -#ifdef ENABLE_FP16 -inline void Int64ToFp16(const int64_t *input, float16_t *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = (float16_t)input[i]; - } -} -#endif - -inline void Fp16ToFloat32(const uint16_t *input, float *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = ShortToFloat32(input[i]); - } -} - -inline void Float32ToFp16(const float *input, uint16_t *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = Float32ToShort(input[i]); - } -} - -inline void Float32ToInt32(const float *input, int32_t *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = (int32_t)input[i]; - } -} - -inline void Float32ToInt64(const float *input, int64_t *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = (int64_t)input[i]; - } -} - -inline void Int32ToInt64(const int32_t *input, int64_t *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = (int64_t)input[i]; - } -} - -inline void Float32ToInt16(const float *input, int16_t *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = (int16_t)input[i]; - } -} - -inline void BoolToInt32(const bool *input, int32_t *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = (int32_t)input[i]; - } -} - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_CAST_BASE_H_ diff --git a/mindspore/lite/nnacl/base/concat_base.h b/mindspore/lite/nnacl/base/concat_base.h deleted file mode 100644 index ae6bc5da02..0000000000 --- a/mindspore/lite/nnacl/base/concat_base.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_CONCAT_BASE_H_ -#define MINDSPORE_LITE_NNACL_FP32_CONCAT_BASE_H_ - -#include -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif -void Concat(void **input, int input_num, int axis, int **inputs_output_shape, size_t shape_size, void *output, - int task_id, int thread_num, int data_size); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_CONCAT_BASE_H_ diff --git a/mindspore/lite/nnacl/base/conv1x1_base.h b/mindspore/lite/nnacl/base/conv1x1_base.h deleted file mode 100644 index fc2b63d7b0..0000000000 --- a/mindspore/lite/nnacl/base/conv1x1_base.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_BASE_CONV1X1_BASE_H_ -#define MINDSPORE_LITE_NNACL_BASE_CONV1X1_BASE_H_ - -#include "nnacl/conv_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void Conv1x1InputPack(const void *src_ptr, void *dst_ptr, ConvParameter *conv_param, int data_size); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_BASE_CONV1X1_BASE_H_ diff --git a/mindspore/lite/nnacl/base/depth_to_space_base.h b/mindspore/lite/nnacl/base/depth_to_space_base.h deleted file mode 100644 index 23474a4f44..0000000000 --- a/mindspore/lite/nnacl/base/depth_to_space_base.h +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_DEPTH_TO_SPACE_H_ -#define MINDSPORE_LITE_NNACL_DEPTH_TO_SPACE_H_ - -#include -#include "nnacl/depth_to_space_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -void DepthToSpaceForNHWC(const void *input, void *output, const int *in_shape, const DepthToSpaceParameter *param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_DEPTH_TO_SPACE_H_ diff --git a/mindspore/lite/nnacl/base/fill_base.h b/mindspore/lite/nnacl/base/fill_base.h deleted file mode 100644 index 79a100d83e..0000000000 --- a/mindspore/lite/nnacl/base/fill_base.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FILL_BASE_H_ -#define MINDSPORE_LITE_NNACL_FILL_BASE_H_ - -#include "nnacl/op_base.h" -#include "nnacl/errorcode.h" -#include "nnacl/fill_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -int FillFp32(float *output, int size, float data); -int FillInt32(int *output, int size, int data); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FILL_BASE_H_ diff --git a/mindspore/lite/nnacl/base/gather_base.h b/mindspore/lite/nnacl/base/gather_base.h deleted file mode 100644 index 1c3eb0c9b1..0000000000 --- a/mindspore/lite/nnacl/base/gather_base.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_GATHER_BASE_H_ -#define MINDSPORE_LITE_NNACL_GATHER_BASE_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/errorcode.h" - -#ifdef __cplusplus -extern "C" { -#endif -int Gather(const void *input, int outer_size, int inner_size, int limit, const int *indices, int indices_element_size, - void *output, int data_size); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_GATHER_BASE_H_ diff --git a/mindspore/lite/nnacl/base/minimal_filtering_generator.h b/mindspore/lite/nnacl/base/minimal_filtering_generator.h deleted file mode 100644 index 46f4357b11..0000000000 --- a/mindspore/lite/nnacl/base/minimal_filtering_generator.h +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_MINIMAL_FILTERING_GENERATOR_H_ -#define MINDSPORE_LITE_NNACL_MINIMAL_FILTERING_GENERATOR_H_ - -#ifdef ENABLE_ARM -#include -#endif -#include -#include "nnacl/pack.h" - -#ifdef __cplusplus -extern "C" { -#endif -void Polynomial(const float *interval, float *m, int degree); - -void DiagonalPlusMatrix(const float *matrix, float *diagonal_matrix, int degree); - -void ResidueMatrix(const float *interval, float *b, int row, int col); - -int LT(const float *poly_array, float *matrix_lt, int n); - -void T(const float *poly_array, float *matrix_t, int n); - -int B(const float *poly_array, float *matrix_b, int in_unit); - -void GenerateIntervalArray(float *array, float interval, int degree); - -void MatrixTranspose(const float *matrix, float *trans_matrix, int row, int col); - -void MatrixMultiply(const float *matrix_a, const float *matrix_b, float *matrix_c, int m, int k, int n); - -int CookToomFilter(float *matrix_a, float *matrix_at, float *matrix_b, float *matrix_bt, float *matrix_g, - float *matrix_gt, float coefficient, int out_unit, int filter_size); -void MatrixMultiplyWinograd(const float *matix_a, const float *matrix_b, float *matrix_c, int m, int k, int n, - int in_channel, int c4_channel); - -int WinogradWeightTransform(const float *weight_data, float *winograd_data, float *matrix_g, const float *matrix_gt, - int oc_block, int input_unit_, int kernel_unit_, int channel, int batch, bool pack); - -#if defined(ENABLE_ARM) || defined(ENABLE_SSE) -void MatrixMultiplyVec(const MS_FLOAT32X4 *matrix_a, const MS_FLOAT32X4 *matrix_b, MS_FLOAT32X4 *matrix_c, - const float *bias, int m, int k, int n); -#endif -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_MINIMAL_FILTERING_GENERATOR_H_ diff --git a/mindspore/lite/nnacl/base/slice_base.h b/mindspore/lite/nnacl/base/slice_base.h deleted file mode 100644 index 35ad748eca..0000000000 --- a/mindspore/lite/nnacl/base/slice_base.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_BASE_SLICE_BASE_H_ -#define MINDSPORE_LITE_NNACL_BASE_SLICE_BASE_H_ - -#include "nnacl/op_base.h" -#include "nnacl/errorcode.h" -#include "nnacl/slice_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -void PadSliceParameterTo4D(SliceParameter *param); - -void DoSlice(const void *input, void *output, SliceParameter *param, int thread_id, int data_size); -void DoSliceNoParallel(const void *input, void *output, SliceParameter *param, int data_size); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_BASE_SLICE_BASE_H_ diff --git a/mindspore/lite/nnacl/base/split_base.h b/mindspore/lite/nnacl/base/split_base.h deleted file mode 100644 index 3114e83f6c..0000000000 --- a/mindspore/lite/nnacl/base/split_base.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_NNACL_SPLIT_BASE_H_ -#define MINDSPORE_LITE_NNACL_NNACL_SPLIT_BASE_H_ - -#include "nnacl/op_base.h" -#include "nnacl/split_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -int DoSplit(void *in_data, void **out_data, const int *input_shape, int offset, int num_unit, - SplitParameter *split_param, int data_size); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_NNACL_SPLIT_BASE_H_ diff --git a/mindspore/lite/nnacl/base/stack_base.h b/mindspore/lite/nnacl/base/stack_base.h deleted file mode 100644 index 0ccfa7a5e3..0000000000 --- a/mindspore/lite/nnacl/base/stack_base.h +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_STACK_H_ -#define MINDSPORE_LITE_NNACL_STACK_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/stack_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -void Stack(char **inputs, char *output, size_t input_num, size_t copy_size, size_t outter_size); -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_STACK_H_ diff --git a/mindspore/lite/nnacl/base/tile_base.h b/mindspore/lite/nnacl/base/tile_base.h deleted file mode 100644 index 7a499a3aa7..0000000000 --- a/mindspore/lite/nnacl/base/tile_base.h +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_BASE_TILE_H_ -#define MINDSPORE_LITE_NNACL_BASE_TILE_H_ - -#include "nnacl/op_base.h" - -typedef struct TileParameter { - // primitive parameter - OpParameter op_parameter_; - int multiples_[5]; - int dims_[5]; - size_t dims_size_; - size_t multiples_size_; - - // shape correlative - int in_shape_[5]; - int out_shape_[5]; - int in_strides_[5]; - int out_strides_[5]; - - // other parameter - int in_dim_; - size_t data_size_; - size_t fast_outer_size_; - size_t fast_stride_; - size_t fast_multiple_; -} TileParameter; - -#ifdef __cplusplus -extern "C" { -#endif -void Tile(void *input_data, void *output_data, TileParameter *parameter); -void TileSimple(void *input_data, void *output_data, size_t begin, size_t end, TileParameter *parameter); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_BASE_TILE_H_ diff --git a/mindspore/lite/nnacl/base/unstack_base.h b/mindspore/lite/nnacl/base/unstack_base.h deleted file mode 100644 index ffc4637a99..0000000000 --- a/mindspore/lite/nnacl/base/unstack_base.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_UNSTACK_H_ -#define MINDSPORE_LITE_NNACL_UNSTACK_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/unstack_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -void Unstack(const void *input, void **output, UnstackParameter *para, int data_size); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_UNSTACK_H_ diff --git a/mindspore/lite/nnacl/base/zeroslike_base.h b/mindspore/lite/nnacl/base/zeroslike_base.h deleted file mode 100644 index a778b9ae95..0000000000 --- a/mindspore/lite/nnacl/base/zeroslike_base.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_ZEROSLIKE_BASE_H_ -#define MINDSPORE_LITE_NNACL_ZEROSLIKE_BASE_H_ - -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif - -static inline void ApproximateZerosLike(void *output, int number, int data_size) { - memset(output, 0.0, number * data_size); - return; -} - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_ZEROSLIKE_BASE_H_ diff --git a/mindspore/lite/nnacl/batch_to_space.h b/mindspore/lite/nnacl/batch_to_space.h deleted file mode 100644 index 7a8a76003b..0000000000 --- a/mindspore/lite/nnacl/batch_to_space.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_BATCH_TO_SPACE_H_ -#define MINDSPORE_LITE_NNACL_BATCH_TO_SPACE_H_ - -#include -#include "nnacl/op_base.h" - -#define BATCH_TO_SPACE_BLOCK_SHAPE_SIZE 2 - -typedef struct BatchToSpaceParameter { - OpParameter op_parameter_; - int32_t block_shape_[BATCH_TO_SPACE_BLOCK_SHAPE_SIZE]; - int32_t crops_[COMM_SHAPE_SIZE]; - bool no_crop_; -} BatchToSpaceParameter; - -#endif // MINDSPORE_LITE_NNACL_FP32_BATCH_TO_SPACE_H_ diff --git a/mindspore/lite/nnacl/batchnorm_parameter.h b/mindspore/lite/nnacl/batchnorm_parameter.h deleted file mode 100644 index 8e460cf438..0000000000 --- a/mindspore/lite/nnacl/batchnorm_parameter.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_BATCHNORM_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_BATCHNORM_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct BatchNormParameter { - OpParameter op_parameter_; - float epsilon_; - float momentum_; - int unit_; - int units_; - int channel_; - bool fused_; -} BatchNormParameter; - -#endif // MINDSPORE_LITE_NNACL_BATCHNORM_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/broadcast_to_parameter.h b/mindspore/lite/nnacl/broadcast_to_parameter.h deleted file mode 100644 index dd9823729b..0000000000 --- a/mindspore/lite/nnacl/broadcast_to_parameter.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_BROADCAST_TO_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_FP32_BROADCAST_TO_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct BroadcastToParameter { - OpParameter op_parameter_; - int shape_[COMM_SHAPE_SIZE]; - size_t shape_size_; -} BroadcastToParameter; - -typedef struct BroadcastShapeInfo { - int input_shape_[COMM_SHAPE_SIZE]; - int input_shape_size_; - int output_shape_[COMM_SHAPE_SIZE]; - int output_shape_size_; -} BroadcastShapeInfo; - -#endif // MINDSPORE_LITE_NNACL_FP32_BROADCAST_TO_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/cast_parameter.h b/mindspore/lite/nnacl/cast_parameter.h deleted file mode 100644 index 4a56bc3a98..0000000000 --- a/mindspore/lite/nnacl/cast_parameter.h +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_CAST_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_CAST_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct CastParameter { - OpParameter op_parameter_; - int dst_type_; - int src_type_; -} CastParameter; - -#endif // MINDSPORE_LITE_NNACL_CAST_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/common_func.h b/mindspore/lite/nnacl/common_func.h deleted file mode 100644 index 0ab52e62b7..0000000000 --- a/mindspore/lite/nnacl/common_func.h +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_COMMON_FUNC_H_ -#define MINDSPORE_LITE_NNACL_COMMON_FUNC_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/conv_parameter.h" -#include "nnacl/nnacl_common.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int8_t MinInt8(int8_t a, int8_t b); -int8_t MaxInt8(int8_t a, int8_t b); -void ReluFp32(float *data, float *dst, int ele_num); -void Relu6Fp32(float *data, float *dst, int ele_num); -#ifdef ENABLE_AVX -#ifdef WIN32 -void ReluFp32C8(float *data, float *dst, int ele_num); -void Relu6Fp32C8(float *data, float *dst, int ele_num); -#endif -#endif -int offset(const int *shape, const int dim0, const int dim1, const int dim2, const int dim3); -int offsetComm(const int *shape, const int dim0, const int dim1, const int dim2); -int offset4d(const int *shape, const int *dims); - -static inline bool isAddOverflow(int32_t x, int32_t y) { - int32_t sum = x + y; - return (x > 0 && y > 0 && sum < 0) || (x < 0 && y < 0 && sum > 0); -} - -static inline bool isMulOverflow(int32_t x, int32_t y) { - int32_t p = x * y; - return (x != 0) && (p / x != y); -} - -static inline int GetStride(int *strides, const int *shape, int length) { - if (length <= 0) { - return 1; - } - int stride = 1; - for (int i = length - 1; i >= 0; --i) { - strides[i] = stride; - stride *= shape[i]; - } - return stride; -} - -#ifdef ENABLE_ARM64 -void BiasAdd(const float *bias, float *data, size_t oc4, size_t plan_size); -void BiasAddRelu6(const float *bias, float *data, size_t oc4, size_t plan_size); -void BiasAddRelu(const float *bias, float *data, size_t oc4, size_t plan_size); -void Relu6(float *data, size_t element4); -void Relu(float *data, size_t element4); -#endif - -#ifdef __cplusplus -} -#endif - -#endif /* MINDSPORE_LITE_NNACL_COMMON_FUNC_H_ */ diff --git a/mindspore/lite/nnacl/concat_parameter.h b/mindspore/lite/nnacl/concat_parameter.h deleted file mode 100644 index 35386464e5..0000000000 --- a/mindspore/lite/nnacl/concat_parameter.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_CONCAT_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_CONCAT_PARAMETER_H_ - -#include "nnacl/op_base.h" -#include "nnacl/int8/quantize.h" - -typedef struct ConcatParameter { - OpParameter op_parameter_; - ConcatQuantArg quant_arg_; - int axis_; - int thread_count_; - int input_num_; - int **input_shapes_; - int *output_shapes_; - int64_t after_axis_size; - int64_t count_unit_; -} ConcatParameter; - -#endif // MINDSPORE_LITE_NNACL_CONCAT_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/constant_of_shape_parameter.h b/mindspore/lite/nnacl/constant_of_shape_parameter.h deleted file mode 100644 index 813fdf488d..0000000000 --- a/mindspore/lite/nnacl/constant_of_shape_parameter.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2020-2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_CONSTANT_OF_SHAPE_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_CONSTANT_OF_SHAPE_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct ConstantOfShapeParameter { - OpParameter op_parameter_; - union value_ { - float f32_value_; - int32_t int32_value_; - } value_; - int data_type_; - int element_size_; -} ConstantOfShapeParameter; - -#endif // MINDSPORE_LITE_NNACL_CONSTANT_OF_SHAPE_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/conv_parameter.h b/mindspore/lite/nnacl/conv_parameter.h deleted file mode 100644 index 5fa4e215d5..0000000000 --- a/mindspore/lite/nnacl/conv_parameter.h +++ /dev/null @@ -1,131 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_CONV_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_CONV_PARAMETER_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" -#include "nnacl/int8/quantize.h" - -typedef struct ConvParameter { - OpParameter op_parameter_; - ConvQuantArg conv_quant_arg_; - int kernel_h_; - int kernel_w_; - int stride_h_; - int stride_w_; - int dilation_h_; - int dilation_w_; - int pad_u_; - int pad_d_; - int pad_l_; - int pad_r_; - int group_; - int tile_num_; - int input_batch_; - int input_h_; - int input_w_; - int input_channel_; - int output_batch_; - int output_h_; - int output_w_; - int output_channel_; - int thread_num_; - int input_unit_; - int output_unit_; - PadMode pad_mode_; - ActType act_type_; - int channel_multiplie_; - int output_padding_w_; - int output_padding_h_; -} ConvParameter; - -typedef struct SlidingWindowParam { - int left_; - int right_; - int top_; - int bottom_; - int c_block_; - int block_channel_; - int ic4_channel_; - int out_step_; - int out_h_step_; - int in_step_; - int in_h_step_; - int in_sh_step_; // stride H - int in_sw_step_; // stride W - int in_kh_step_; // kernel H - int in_kw_step_; // kernel W - int kernel_step_; -} SlidingWindowParam; - -#define OUPUT_UNIT 2 -#define DECONV_WINOGRAD_DEFAULT_UNIT 3 -#define DECONV_WINOGRAD_DEFAULT_TILE 8 -#define DECONV_WINOGRAD_BUFFER_COUNT 8 -typedef struct DeConvWg { - void *b_buffer_; - void *AT_; - void *BT_; - - int kh_; - int kw_; - - int k_; - int i_; - int o_; -} DeConvWg; - -typedef struct DeConvWgABuffer { - bool buf_init_; - void *middle_buffer_; - void *dest_buffer_; -} DeConvWgABuffer; - -typedef struct DeConvComputeUnit { - void *weight_; - void *tmp_buffer_; - int w_start_; - int h_start_; - int w_size_; - int h_size_; - bool use_winograd_; - DeConvWg winograd_; -} DeConvComputeUnit; - -typedef struct DeConvParam { - DeConvComputeUnit *compute_units_; - int compute_size_; - DeConvWgABuffer a_buffer_[DECONV_WINOGRAD_BUFFER_COUNT]; - int input_plane_; - int output_plane_; - int kernel_plane_; - int ic_div4_; - int oc_div4_; - int ic_up4_; - int oc_up4_; - int thread_num_; - int in_tile_count_; - int in_tile_h_count_; - int in_tile_w_count_; - int out_tile_h_; - int out_tile_w_; -} DeConvParam; - -#endif // MINDSPORE_LITE_NNACL_CONV_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/crop_parameter.h b/mindspore/lite/nnacl/crop_parameter.h deleted file mode 100644 index 6730a0be6e..0000000000 --- a/mindspore/lite/nnacl/crop_parameter.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_CROP_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_CROP_PARAMETER_H_ - -#include "nnacl/op_base.h" -#include "nnacl/int8/quantize.h" - -typedef struct CropParameter { - OpParameter op_parameter_; - CropQuantArg quant_arg; - int thread_count_; - int offset_size_; - int64_t offset_[COMM_SHAPE_SIZE]; - int64_t in_offset_[COMM_SHAPE_SIZE]; - int64_t axis_; - int *in_shape_; - int *out_shape_; - int input_dim_; -} CropParameter; - -#endif // MINDSPORE_LITE_NNACL_CROP_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/depth_to_space_parameter.h b/mindspore/lite/nnacl/depth_to_space_parameter.h deleted file mode 100644 index 0c9dfe9ba4..0000000000 --- a/mindspore/lite/nnacl/depth_to_space_parameter.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_DEPTH_TO_SPACE_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_DEPTH_TO_SPACE_PARAMETER_H_ -#include "nnacl/op_base.h" - -typedef struct DepthToSpaceParameter { - OpParameter op_parameter_; - // primitive parameter - int32_t block_size_; - // shape correlative - int32_t in_stride_dim0_; - int32_t in_stride_dim1_; - int32_t in_stride_dim2_; - int32_t out_stride_dim0_; - int32_t out_stride_dim1_; - int32_t out_stride_dim2_; - // other parameter - uint8_t data_type_size_; -} DepthToSpaceParameter; - -#endif // MINDSPORE_LITE_NNACL_DEPTH_TO_SPACE_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/detection_post_process_parameter.h b/mindspore/lite/nnacl/detection_post_process_parameter.h deleted file mode 100644 index f7bc270a6c..0000000000 --- a/mindspore/lite/nnacl/detection_post_process_parameter.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_DETECTION_POST_PROCESS_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_DETECTION_POST_PROCESS_PARAMETER_H_ -#include "nnacl/op_base.h" - -typedef struct DetectionPostProcessParameter { - OpParameter op_parameter_; - float h_scale_; - float w_scale_; - float x_scale_; - float y_scale_; - float nms_iou_threshold_; - float nms_score_threshold_; - int64_t max_detections_; - int64_t detections_per_class_; - int64_t max_classes_per_detection_; - int64_t num_classes_; - bool use_regular_nms_; - bool out_quantized_; - - float *anchors_; - - void *decoded_boxes_; - void *nms_candidate_; - void *indexes_; - void *scores_; - void *all_class_indexes_; - void *all_class_scores_; - void *single_class_indexes_; - void *selected_; -} DetectionPostProcessParameter; - -#endif // MINDSPORE_LITE_NNACL_DETECTION_POST_PROCESS_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/errorcode.h b/mindspore/lite/nnacl/errorcode.h deleted file mode 100644 index 18a50290cc..0000000000 --- a/mindspore/lite/nnacl/errorcode.h +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_ERRORCODE_H_ -#define MINDSPORE_LITE_NNACL_ERRORCODE_H_ - -typedef enum ErrorCodeCommonEnum { - NNACL_OK = 0, - NNACL_ERR = 1, - NNACL_NULL_PTR, - NNACL_PARAM_INVALID, - NNACL_INFER_INVALID, - NNACL_INPUT_TENSOR_ERROR, - NNACL_COMMON_END = 9999 -} ErrorCodeCommonEnum; - -typedef enum ErrorCodeFp32OpEnum { - NNACL_ERRCODE_OP_FP32_START = 10000, - NNACL_ERRCODE_STRASSEN_RECURSION_MALLOC, - NNACL_ERRCODE_REVERSE_MALLOC, - NNACL_ERRCODE_SQRT_NEGATIVE, - NNACL_ERRCODE_RSQRT_NEGATIVE, - NNACL_ERRCODE_RSQRT_NEGATIVE_OR_ZERO, - NNACL_ERRCODE_LOG_NEGATIVE_OR_ZERO, - NNACL_ERRCODE_DIVISOR_ZERO, - NNACL_ERRCODE_INDEX_OUT_OF_RANGE, - NNACL_ERRCODE_WINOGRAD_GENERATOR_ERROR, - NNACL_ERRCODE_OP_FP32_END = 19999 -} ErrorCodeFp32OpEnum; - -typedef enum ErrorCodeFp16OpEnum { - NNACL_ERRCODE_OP_FP16_START = 20000, - NNACL_ERRCODE_OP_FP16_WINOGRAD_GENERATOR, - NNACL_ERRCODE_OP_FP16_END = 29999 -} ErrorCodeFp16OpEnum; - -typedef enum ErrorCodeUint8OpEnum { - NNACL_ERRCODE_OP_UINT8_START = 30000, - NNACL_ERRCODE_OP_UINT8_END = 39999 -} ErrorCodeUint8OpEnum; - -typedef enum ErrorCodeInt8OpEnum { - NNACL_ERRCODE_OP_INT8_START = 40000, - NNACL_ERRCODE_ADD_OVERFLOW, - NNACL_ERRCODE_MUL_OVERFLOW, - NNACL_ERRCODE_OP_INT8_END = 49999 -} ErrorCodeInt8OpEnums; - -#endif // MINDSPORE_LITE_NNACL_ERRORCODE_H_ diff --git a/mindspore/lite/nnacl/fill_parameter.h b/mindspore/lite/nnacl/fill_parameter.h deleted file mode 100644 index dbde073817..0000000000 --- a/mindspore/lite/nnacl/fill_parameter.h +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FILL_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_FILL_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct FillParameter { - // Primitive parameter - OpParameter op_parameter_; - int dims_[COMM_SHAPE_SIZE]; - int num_dims_; -} FillParameter; - -#endif // MINDSPORE_LITE_NNACL_FILL_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/fp16/activation_fp16.h b/mindspore/lite/nnacl/fp16/activation_fp16.h deleted file mode 100644 index 6463490490..0000000000 --- a/mindspore/lite/nnacl/fp16/activation_fp16.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP16_ACTIVATION_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_ACTIVATION_FP16_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include -#include "nnacl/op_base.h" -#include "nnacl/int8/fixed_point.h" - -#ifdef __cplusplus -extern "C" { -#endif -int ReluFp16(const float16_t *src, float16_t *dst, int ele_num); -int Relu6Fp16(const float16_t *data, float16_t *dst, int ele_num); -int LReluFp16(const float16_t *src, float16_t *dst, int ele_num, float16_t alpha); -int SigmoidFp16(const float16_t *src, float16_t *dst, int ele_num); -int TanhFp16(const float16_t *src, float16_t *dst, int ele_num); -int HSwishFp16(const float16_t *src, float16_t *dst, int ele_num); -int SwishFp16(const float16_t *src, float16_t *dst, int ele_num); -int HardTanhFp16(const float16_t *src, int length, float16_t *dst, float min_val, float max_val); -int GeluFp16(const float16_t *src, int length, float16_t *dst, bool approximate); -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_FP16_ACTIVATION_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/arg_min_max_fp16.h b/mindspore/lite/nnacl/fp16/arg_min_max_fp16.h deleted file mode 100644 index e969b1e627..0000000000 --- a/mindspore/lite/nnacl/fp16/arg_min_max_fp16.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP16_ARG_MIN_MAX_H_ -#define MINDSPORE_LITE_NNACL_FP16_ARG_MIN_MAX_H_ - -#include -#include "nnacl/arg_min_max_parameter.h" -#include "nnacl/nnacl_common.h" - -#ifdef __cplusplus -extern "C" { -#endif -void ArgMinMaxFp16(const float16_t *input, void *output, float16_t *output_value, const int *in_shape, - const ArgMinMaxParameter *param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_ARG_MIN_MAX_H_ diff --git a/mindspore/lite/nnacl/fp16/arithmetic_fp16.h b/mindspore/lite/nnacl/fp16/arithmetic_fp16.h deleted file mode 100644 index 10c34073e8..0000000000 --- a/mindspore/lite/nnacl/fp16/arithmetic_fp16.h +++ /dev/null @@ -1,126 +0,0 @@ -/** - * Copyright 2020-2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP16_ARITHMETIC_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_ARITHMETIC_FP16_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" -#include "nnacl/base/arithmetic_base.h" -#include "nnacl/errorcode.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void TileOneDimensionFp16(const float16_t *inData, float16_t *outData, int dim, size_t ndim, const int *inShape, - const int *inStrides, const int *outStrides, const int *multiple); -void TileDimensionsFp16(const float16_t *data0, const float16_t *data1, float16_t *tile_data0, float16_t *tile_data1, - ArithmeticParameter *param); - -int ElementOptMulFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptMulReluFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptMulRelu6Fp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptAddFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptAddReluFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptAddRelu6Fp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptSubFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptSubReluFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptSubRelu6Fp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptDivFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptDivReluFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptDivRelu6Fp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptFloorModFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptFloorDivFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptLogicalAndFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptLogicalOrFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptSquaredDifferenceFp16(const float16_t *input0, const float16_t *input1, float16_t *output, - int element_size, ArithmeticParameter *param); -int ElementOptMaximumFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptMinimumFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptNotEqualFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptEqualFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptLessFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptLessEqualFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptGreaterFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size, - ArithmeticParameter *param); -int ElementOptGreaterEqualFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size, - ArithmeticParameter *param); - -int ElementMulFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); -int ElementMulReluFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); -int ElementMulRelu6Fp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); - -int ElementAddFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); -int ElementAddReluFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); -int ElementAddRelu6Fp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); -int BroadcastAddFp16(const float16_t *in0, const float16_t *in1, float16_t *tile_in0, float16_t *tile_in1, - float16_t *out, int size, ArithmeticParameter *param); - -int ElementSubFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); -int ElementSubReluFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); -int ElementSubRelu6Fp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); - -int ElementDivFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); -int ElementDivReluFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); -int ElementDivRelu6Fp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); - -int ElementFloorModFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); -int ElementFloorDivFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); - -int ElementLogicalAndFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); -int ElementLogicalOrFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); - -int ElementSquaredDifferenceFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); - -int ElementMaximumFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); -int ElementMinimumFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); - -int ElementNotEqualFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size); -int ElementEqualFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size); -int ElementLessFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size); -int ElementLessEqualFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size); -int ElementGreaterFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size); -int ElementGreaterEqualFp16(const float16_t *input0, const float16_t *input1, uint8_t *output, int element_size); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_ARITHMETIC_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/arithmetic_self_fp16.h b/mindspore/lite/nnacl/fp16/arithmetic_self_fp16.h deleted file mode 100644 index 017f08f9d7..0000000000 --- a/mindspore/lite/nnacl/fp16/arithmetic_self_fp16.h +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP16_ARITHMETIC_SELF_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_ARITHMETIC_SELF_FP16_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" -#include "nnacl/errorcode.h" - -#ifdef __cplusplus -extern "C" { -#endif -int ElementAbsFp16(float16_t *input, float16_t *output, int element_size); - -int ElementCosFp16(float16_t *input, float16_t *output, int element_size); - -int ElementLogFp16(float16_t *input, float16_t *output, int element_size); - -int ElementSquareFp16(float16_t *input, float16_t *output, int element_size); - -int ElementSqrtFp16(float16_t *input, float16_t *output, int element_size); - -int ElementRsqrtFp16(float16_t *input, float16_t *output, int element_size); - -int ElementSinFp16(float16_t *input, float16_t *output, int element_size); - -int ElementLogicalNotFp16(float16_t *input, float16_t *output, int element_size); - -int ElementRoundFp16(float16_t *input, float16_t *output, int element_size); - -int ElementFloorFp16(float16_t *input, float16_t *output, int element_size); - -int ElementCeilFp16(float16_t *input, float16_t *output, int number); - -int ElementNegativeFp16(float16_t *input, float16_t *output, int element_size); - -int ElementReciprocalFp16(float16_t *input, float16_t *output, int element_size); - -int ElementErfFp16(float16_t *input, float16_t *output, int element_size); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_ARITHMETIC_SELF_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/cast_fp16.h b/mindspore/lite/nnacl/fp16/cast_fp16.h deleted file mode 100644 index 7493196fd1..0000000000 --- a/mindspore/lite/nnacl/fp16/cast_fp16.h +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_CAST_FP16_H_ -#define MINDSPORE_LITE_NNACL_CAST_FP16_H_ - -#include -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif - -inline void BoolToFloat16(const bool *input, float16_t *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = (float16_t)input[i]; - } -} - -inline void Uint8ToFloat16(const uint8_t *input, float16_t *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = (float16_t)input[i]; - } -} - -inline void Float16ToInt32(const float16_t *input, int32_t *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = (int32_t)input[i]; - } -} - -inline void Float16ToInt64(const float16_t *input, int64_t *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = (int64_t)input[i]; - } -} - -inline void Float32ToFloat16(const float *input, float16_t *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = (float16_t)input[i]; - } -} - -inline void Float16ToFloat32(const float16_t *input, float *output, int number) { - for (int i = 0; i < number; ++i) { - output[i] = (float)input[i]; - } -} - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_CAST_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/common_func_fp16.h b/mindspore/lite/nnacl/fp16/common_func_fp16.h deleted file mode 100644 index e559951bf7..0000000000 --- a/mindspore/lite/nnacl/fp16/common_func_fp16.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP16_COMMON_FUNC_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_COMMON_FUNC_FP16_H_ - -#include -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/* deconv common */ -void PostConvFuncFp16C8(const float16_t *c8_out_ptr, float16_t *out_ptr, const float16_t *bias_ptr, - size_t output_channel, size_t plane_size, size_t stride, ActType act_type); -void PostFuncBiasReluC8Fp16(float16_t *dst, const float16_t *src, const float16_t *bias, size_t oc8div, size_t oc8mod, - size_t plane_size, size_t stride, size_t relu_type); - -/* deconv winograd */ -void PostConvFuncFp16C4(const float16_t *c4_out, float16_t *nhwc_out, const float16_t *bias, size_t output_channel, - size_t plane_size, size_t plane_stride, ActType act_type); -void PostFuncBiasReluC4Fp16(float16_t *dst, const float16_t *src, const float16_t *bias, size_t oc4div, size_t oc4mod, - size_t plane_size, size_t plane_stride, size_t relu_type); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_FP16_COMMON_FUNC_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/constant_of_shape_fp16.h b/mindspore/lite/nnacl/fp16/constant_of_shape_fp16.h deleted file mode 100644 index a414b50b40..0000000000 --- a/mindspore/lite/nnacl/fp16/constant_of_shape_fp16.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2020-2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP16_CONSTANT_OF_SHAPE_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_CONSTANT_OF_SHAPE_FP16_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" -#include "nnacl/errorcode.h" -#include "nnacl/constant_of_shape_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -#ifdef __cplusplus -#ifdef ENABLE_NEON -inline int ConstantOfShapeFp16(float16_t *output, int start, int end, float16_t value) { - for (int i = start; i < end; i++) { - output[i] = value; - } - return NNACL_OK; -} -#endif -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_CONSTANT_OF_SHAPE_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/conv_depthwise_fp16.h b/mindspore/lite/nnacl/fp16/conv_depthwise_fp16.h deleted file mode 100644 index 75291d79fd..0000000000 --- a/mindspore/lite/nnacl/fp16/conv_depthwise_fp16.h +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP16_CONV_DEPTHWISE_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_CONV_DEPTHWISE_FP16_H_ - -#include "nnacl/conv_parameter.h" -#include "nnacl/fp32/conv_depthwise_fp32.h" - -#ifdef __cplusplus -extern "C" { -#endif -#ifdef ENABLE_ARM64 -void ConvDwFp16Row(float16_t *output_ptr, const float16_t *input_ptr, const float16_t *filter_ptr, size_t num_pixels, - size_t input_channel, size_t input_step); -void ConvDwFp16Border(float16_t *dst, const float16_t *src, const float16_t *weight, const float16_t *bias, - size_t height, size_t width, size_t in_kh_step, size_t in_kw_step, size_t kernel_w, size_t relu, - size_t relu6); -void ConvDwFp16Center(float16_t *dst, const float16_t *src, const float16_t *weight, const float16_t *bias, - size_t height, size_t width, size_t kernel_h, size_t kernel_w, size_t out_h_step, - size_t block_channel, size_t in_sh_step, size_t in_sw_step, size_t in_kh_step, size_t in_kw_step, - size_t relu, size_t relu6); -void DeconvDwFp16Border(float16_t *dst, const float16_t *src, const float16_t *weight, size_t height, size_t width, - size_t in_kh_step, size_t in_kw_step, size_t kernel_w); -void DeconvDwFp16Center(float16_t *dst, const float16_t *src, const float16_t *weight, size_t height, size_t width, - size_t kernel_h, size_t kernel_w, size_t out_h_step, size_t block_channel, size_t in_sh_step, - size_t in_sw_step, size_t in_kh_step, size_t in_kw_step); -#endif - -void ConvDwFp16(float16_t *output_data, const float16_t *input_data, const float16_t *weight_data, - const float16_t *bias_data, const ConvParameter *conv_param, int task_id); - -void ConvDwC8Fp16(float16_t *output_data, const float16_t *input_data, const float16_t *weight_data, - const float16_t *bias_data, const ConvParameter *conv_param, const SlidingWindowParam *sliding, - int task_id); - -void DeconvDwC8Fp16(float16_t *output_data, const float16_t *input_data, const float16_t *weight_data, - const float16_t *bias_data, const ConvParameter *conv_param, const SlidingWindowParam *sliding, - int task_id); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_CONV_DEPTHWISE_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/conv_fp16.h b/mindspore/lite/nnacl/fp16/conv_fp16.h deleted file mode 100644 index b38b2854ea..0000000000 --- a/mindspore/lite/nnacl/fp16/conv_fp16.h +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP16_CONV_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_CONV_FP16_H_ - -#include -#include "nnacl/conv_parameter.h" -#include "nnacl/fp16/winograd_utils_fp16.h" -#include "nnacl/fp16/winograd_transform_fp16.h" - -typedef float16_t *TmpBufferAddressFp16; -typedef float16_t *MatricesFp16; - -#ifndef ENABLE_NEON -void IndirectGemmFp16_16x8(float16_t *output, float16_t *input, float16_t *weight, float16_t *bias, size_t step, - size_t ic4, size_t oc8, size_t offset, size_t mode, size_t writeC8, size_t relu, - size_t relu6); - -void IndirectGemmFp16_16x8_common(float16_t *output, float16_t *input, float16_t *weight, float16_t *bias, size_t step, - size_t ic4, size_t oc8, size_t offset, size_t relu, size_t relu6); - -void IndirectGemmFp16_16x8_c8(float16_t *output, float16_t *input, float16_t *weight, float16_t *bias, size_t step, - size_t ic4, size_t oc8, size_t offset, size_t mode, size_t writeC8, size_t relu, - size_t relu6); -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -// fp16 convolution common (im2col+gemm) -void ConvFp16(float16_t *input_data, float16_t *packed_input, float16_t *packed_weight, float16_t *bias_data, - float16_t *col_major_input, float16_t *output_data, int task_id, ConvParameter *conv_param); - -// fp16 convolution winograd -void ConvWinogardFp16(float16_t *input_data, float16_t *trans_weight, const float16_t *bias_data, - float16_t *output_data, TmpBufferAddressFp16 *buffer_list, int task_id, ConvParameter *conv_param, - InputTransFp16Func in_func, OutputTransFp16Func out_func); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_CONV_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/crop_fp16.h b/mindspore/lite/nnacl/fp16/crop_fp16.h deleted file mode 100644 index 18530b674b..0000000000 --- a/mindspore/lite/nnacl/fp16/crop_fp16.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP16_CROP_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_CROP_FP16_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/crop_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -void Fp16Crop(const float16_t *input, float16_t *output, int task_id, CropParameter *para); -void Fp16Crop1D(const float16_t *input, float16_t *output, int task_id, CropParameter *para); -void Fp16Crop2D(const float16_t *input, float16_t *output, int task_id, CropParameter *para); -void Fp16Crop3D(const float16_t *input, float16_t *output, int task_id, CropParameter *para); -void Fp16Crop4D(const float16_t *input, float16_t *output, int task_id, CropParameter *para); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_CROP_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/deconv_fp16.h b/mindspore/lite/nnacl/fp16/deconv_fp16.h deleted file mode 100644 index f095930543..0000000000 --- a/mindspore/lite/nnacl/fp16/deconv_fp16.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP16_DECONV_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_DECONV_FP16_H_ - -#include -#include -#include "nnacl/conv_parameter.h" -#include "nnacl/errorcode.h" -#include "nnacl/fp16/common_func_fp16.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int DeConvPostFp16(const float16_t *src, float16_t *tmp, const float16_t *bias, float16_t *dst, int output_channel, - ConvParameter *conv_param); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_FP16_DECONV_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/deconv_winograd_fp16.h b/mindspore/lite/nnacl/fp16/deconv_winograd_fp16.h deleted file mode 100644 index c4a4f9283e..0000000000 --- a/mindspore/lite/nnacl/fp16/deconv_winograd_fp16.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP16_DECONV_WINOGRAD_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_DECONV_WINOGRAD_FP16_H_ - -#include "nnacl/fp16/winograd_transform_fp16.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int PackDeConvWgDataFp16(float16_t *nhwc_weight, DeConvComputeUnit *unit, ConvParameter *conv_param, - DeConvParam *deconv_param); - -void DeconvWgFp16(float16_t *nhwc_input_, float16_t *tile_in, float16_t *tile_out, int start_index, int calculate_count, - ConvParameter *conv_param, DeConvParam *deconv_param, int task_id); - -void DeconvWgPostFp16(float16_t *tile_out, float16_t *nc4hw4_output, ConvParameter *conv_param, - DeConvParam *deconv_param, int calculate_count, int tile_index); - -void TiledC4MatmulFp16(float16_t *dst, const float16_t *src, const float16_t *weight, size_t ic4, size_t cal_num, - size_t oc4); - -void WinogradTransLeftFp16(const float16_t *S, const float16_t *B, float16_t *M, size_t w, size_t h, size_t k, - size_t length); - -void WinogradTransRightFp16(const float16_t *S, const float16_t *B, float16_t *M, size_t w, size_t h, size_t k, - size_t length); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_DECONV_WINOGRAD_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/exp_fp16.h b/mindspore/lite/nnacl/fp16/exp_fp16.h deleted file mode 100644 index f9db27c09e..0000000000 --- a/mindspore/lite/nnacl/fp16/exp_fp16.h +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP16_EXP_H_ -#define MINDSPORE_LITE_NNACL_FP16_EXP_H_ - -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif -void ExpFp16(const float16_t *src, float16_t *dst, int num); - -#if defined(ENABLE_ARM64) -static inline float32x4_t exp_fp32(float32x4_t input) { - static float32x4_t param[] = {{0.693147f, 0.693147f, 0.693147f, 0.693147f}, - {1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120}, - {1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24}, - {1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6}, - {0.5f, 0.5f, 0.5f, 0.5f}, - {1.0f, 1.0f, 1.0f, 1.0f}}; - int32x4_t integer = vcvtq_s32_f32(input / param[0]); - float32x4_t decimal = input - vcvtq_f32_s32(integer) * param[0]; - int32x4_t int_exp = vshlq_s32((integer + vmovq_n_s32(127)), vmovq_n_s32(23)); - float32x4_t decimal_exp = - param[5] + - decimal * (param[5] + decimal * (param[4] + decimal * (param[3] + decimal * (param[2] + decimal * param[1])))); - decimal_exp = decimal_exp * vld1q_f32((float *)(&int_exp)); - return decimal_exp; -} - -static inline void simd_exp_fp16(float16x8_t input, float16_t *dst) { - static float16x8_t maxv = {88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f}; - static float16x8_t minv = {-88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f}; - - input = vmaxq_f16(minv, vminq_f16(input, maxv)); - float32x4_t input_low = vcvt_f32_f16(vget_low_f16(input)); - float32x4_t input_high = vcvt_high_f32_f16(input); - vst1q_f16(dst, vcombine_f16(vcvt_f16_f32(exp_fp32(input_low)), vcvt_f16_f32(exp_fp32(input_high)))); -} -#endif - -static inline void single_exp_fp16(float16_t src, float16_t *dst) { - static float param[] = {0.693147f, 1.0f / 120, 1.0f / 24, 1.0f / 6, 1.0f / 2, 1.0f}; - src = MSMAX(-88.0f, MSMIN(88.0f, src)); - int integer = (float)src / param[0]; - float decimal = (float)src - integer * param[0]; - int int_exp = (integer + 127) << 23; - const float decimal_exp = - 1.0f + decimal * (1.0f + decimal * (0.5f + decimal * (param[3] + decimal * (param[2] + decimal * param[1])))); - *dst = (float16_t)(*((float *)&int_exp) * decimal_exp); -} -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_EXP_H_ diff --git a/mindspore/lite/nnacl/fp16/gru_fp16.h b/mindspore/lite/nnacl/fp16/gru_fp16.h deleted file mode 100644 index 4f4485748d..0000000000 --- a/mindspore/lite/nnacl/fp16/gru_fp16.h +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP16_GRU_H_ -#define MINDSPORE_LITE_NNACL_FP16_GRU_H_ -#include "nnacl/gru_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -void GruFp16(float16_t *output, const float16_t *input, const float16_t *weight_g, const float16_t *weight_r, - const float16_t *input_bias, const float16_t *state_bias, float16_t *hidden_state, float16_t *buffer[4], - int check_seq_len, const GruParameter *gru_param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_GRU_H_ diff --git a/mindspore/lite/nnacl/fp16/instance_norm_fp16.h b/mindspore/lite/nnacl/fp16/instance_norm_fp16.h deleted file mode 100644 index e6fc99331f..0000000000 --- a/mindspore/lite/nnacl/fp16/instance_norm_fp16.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP16_INSTANCE_NORM_H_ -#define MINDSPORE_LITE_NNACL_FP16_INSTANCE_NORM_H_ - -#include "nnacl/instance_norm_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int InstanceNormFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *gamma_data, - const float16_t *beta_data, const InstanceNormParameter *param, size_t task_id); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_INSTANCE_NORM_H_ diff --git a/mindspore/lite/nnacl/fp16/lstm_fp16.h b/mindspore/lite/nnacl/fp16/lstm_fp16.h deleted file mode 100644 index fff951d8ad..0000000000 --- a/mindspore/lite/nnacl/fp16/lstm_fp16.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP16_LSTM_H_ -#define MINDSPORE_LITE_NNACL_FP16_LSTM_H_ - -#include "nnacl/lstm_parameter.h" -#ifdef __cplusplus -extern "C" { -#endif -void PackLstmWeightFp32ToFp16(float16_t *dst, const float *src, int batch, int deep, int col, int col_align); - -void PackLstmWeightFp16(float16_t *dst, const float16_t *src, int batch, int deep, int col, int col_align); - -void PackLstmBiasFp32ToFp16(float16_t *dst, const float *src, int batch, int col, int col_align, bool is_bidirectional); - -void PackLstmBiasFp16(float16_t *dst, const float16_t *src, int batch, int col, int col_align, bool is_bidirectional); - -void LstmMatMulFp16(float16_t *c, const float16_t *a, const float16_t *b, const float16_t *bias, int row, int deep, - int col, bool is_vec); - -void MatMulAccFp16(float16_t *output, const float16_t *input, const float16_t *weight, int rows, int cols, - int inner_size); - -void ElementMulAccFp16(const float16_t *input0, const float16_t *input1, float16_t *output, int element_size); - -int ElementOptMulAccFp16(const float16_t *input0, const float16_t input1, float16_t *output, const int element_size); - -void LstmFp16(float16_t *output, const float16_t *input, const float16_t *weight_i, const float16_t *weight_h, - const float16_t *input_bias, const float16_t *state_bias, float16_t *hidden_state, float16_t *cell_state, - float16_t *buffer[6], const LstmParameter *lstm_param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_LSTM_H_ diff --git a/mindspore/lite/nnacl/fp16/matmul_fp16.h b/mindspore/lite/nnacl/fp16/matmul_fp16.h deleted file mode 100644 index e2e7a80cc2..0000000000 --- a/mindspore/lite/nnacl/fp16/matmul_fp16.h +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP16_MATMUL_H_ -#define MINDSPORE_LITE_NNACL_FP16_MATMUL_H_ - -#include -#include -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/errorcode.h" -#include "nnacl/matmul_parameter.h" -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif -void MatMul16x8(const float16_t *a, const float16_t *b, float16_t *dst, const float16_t *bias, ActType act_type, - int deep, int row, int col, int stride, bool write_nhwc); - -void MatMulFp16(const float16_t *a, const float16_t *b, float16_t *c, const float16_t *bias, ActType act_type, - int depth, int row, int col, int stride, int out_type); - -void MatVecMulFp16(const float16_t *a, const float16_t *b, float16_t *c, const float16_t *bias, ActType act_type, - int depth, int col); - -void ColMajor2Row8MajorFp16(const void *src_ptr, float16_t *dst_ptr, size_t row, size_t col, bool src_float16); - -void RowMajor2Col16MajorFp16Opt(const float16_t *src_ptr, float16_t *dst_ptr, size_t row, size_t col); - -void MatmulFp16Neon64(const float16_t *a, const float16_t *b, float16_t *c, const float16_t *bias, int act_type, - size_t depth, size_t row, size_t col, size_t stride, bool write_nhwc); - -void MatmulFp16Neon64Opt(const float16_t *a, const float16_t *b, float16_t *c, const float16_t *bias, int act_type, - size_t depth, size_t row, size_t col, size_t stride, size_t write_nhwc); - -void MatVecMulFp16Neon64(const float16_t *a, const float16_t *b, float16_t *c, const float16_t *bias, int act_type, - int depth, int col); - -void RowMajor2Col16MajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src); - -void RowMajor2Row16MajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src); - -void RowMajor2Row8MajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src); - -void RowMajor2Col8MajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src); - -void RowMajor2ColMajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_MATMUL_H_ diff --git a/mindspore/lite/nnacl/fp16/matrix_fp16.h b/mindspore/lite/nnacl/fp16/matrix_fp16.h deleted file mode 100644 index cfa956ab0a..0000000000 --- a/mindspore/lite/nnacl/fp16/matrix_fp16.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP16_MATRIX_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_MATRIX_FP16_H_ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif -void MatrixMultiplyFp16(const float16_t *matrix_a, const float16_t *matrix_b, float16_t *matrix_c, int m, int k, int n); - -void MatrixMultiplyVecFp16(const float16x8_t *matrix_a, const float16x8_t *matrix_b, float16x8_t *matrix_c, - const float16_t *bias, int m, int k, int n); -void MatrixMultiplyWinogradFp16(const float16_t *matix_a, const float16_t *matrix_b, float16_t *matrix_c, int m, int k, - int n, int in_channel); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_MATRIX_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/pack_fp16.h b/mindspore/lite/nnacl/fp16/pack_fp16.h deleted file mode 100644 index b49a35b479..0000000000 --- a/mindspore/lite/nnacl/fp16/pack_fp16.h +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP16_PACK_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_PACK_FP16_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/conv_parameter.h" -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif -void Im2ColPackUnitFp16(float16_t *input_data, ConvParameter *conv_param, float16_t *packed_input, int real_cal_num, - int block_index); - -void PackWeightToC8Fp16(const float16_t *origin_weight_data, float16_t *packed_weight_data, ConvParameter *conv_param); - -void PackHWCToWHCFp16(const float16_t *src, float16_t *dst, int height, int width, int channel); - -void PackWeightToC4Fp16(const float16_t *origin_weight_data, float16_t *packed_weight_data, ConvParameter *conv_param); - -void PackNHWCToNC4HW4Fp16(const void *src, void *dst, int batch, int plane, int channel); - -void PackNCHWToNC4HW4Fp16(const void *src, void *dst, int batch, int plane, int channel); - -void PackNCHWToNHWCFp16(const void *src, void *dst, int batch, int plane, int channel); - -void PackNHWCToNCHWFp16(const void *src, void *dst, int batch, int plane, int channel); - -void PackNHWCToNHWC4Fp16(const void *src, void *dst, int batch, int plane, int channel); - -void PackNHWCToNHWC8Fp16(const void *src, void *dst, int batch, int plane, int channel); - -void PackNHWC4ToNHWCFp16(const void *src, void *dst, int batch, int plane, int channel); - -void PackNCHWToNHWC4Fp16(const void *src, void *dst, int batch, int plane, int channel); - -void PackNC4HW4ToNHWC4Fp16(const void *src, void *dst, int batch, int plane, int channel); - -void PackNC4HW4ToNHWCFp16(const void *src, void *dst, int batch, int plane, int channel); - -void PackNC4HW4ToNCHWFp16(const void *src, void *dst, int batch, int plane, int channel); - -void PackNC8HW8ToNHWCFp16(const void *src, void *dst, int batch, int plane, int channel); - -void PackNCHWFp32ToNC8HW8Fp16(float *src, float16_t *dst, int batch, int plane, int channel); - -void PackNCHWFp16ToNC8HW8Fp16(float16_t *src, float16_t *dst, int batch, int plane, int channel); - -void PackNHWCFp32ToNHWC8Fp16(float *src, float16_t *dst, int batch, int plane, int channel); - -void PackNHWCFp32ToC8HWN8Fp16(float *src, float16_t *dst, int batch, int plane, int channel); - -void PackNHWCFp16ToC8HWN8Fp16(float16_t *src, float16_t *dst, int batch, int plane, int channel); - -void PackNHWC8Fp16ToNHWCFp32(float16_t *src, float *dst, int batch, int plane, int channel); - -void PackNHWC8ToNHWCFp16(float16_t *src, float16_t *dst, int batch, int plane, int channel); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_PACK_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/pad_fp16.h b/mindspore/lite/nnacl/fp16/pad_fp16.h deleted file mode 100644 index 058f26c2f9..0000000000 --- a/mindspore/lite/nnacl/fp16/pad_fp16.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP16_PAD_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_PAD_FP16_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/fp32/pad_fp32.h" - -#ifdef __cplusplus -extern "C" { -#endif -void PadFp16(const float16_t *input_data, float16_t *output_data, const int *input_shape, const int *output_shape, - const int *paddings, const int tid, const int thread_num); -void MirrorPadFp16(const float16_t *input_data, float16_t *output_data, const int *input_shape, - const PadParameter *pad_param, int begin, int end); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_PAD_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/pooling_fp16.h b/mindspore/lite/nnacl/fp16/pooling_fp16.h deleted file mode 100644 index 9dfd043ecc..0000000000 --- a/mindspore/lite/nnacl/fp16/pooling_fp16.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP16_POOLING_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_POOLING_FP16_H_ - -#include -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/pooling_parameter.h" -#ifdef __cplusplus -extern "C" { -#endif -int AvgPoolingFp16(const float16_t *input_ptr, float16_t *output_ptr, PoolingParameter *pooling_param, int task_id, - float16_t min, float16_t max); - -void MaxPoolingFp16(const float16_t *input_ptr, float16_t *output_ptr, PoolingParameter *pooling_param, int task_id, - float16_t min, float16_t max); -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_FP16_POOLING_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/power_fp16.h b/mindspore/lite/nnacl/fp16/power_fp16.h deleted file mode 100644 index 32663f809f..0000000000 --- a/mindspore/lite/nnacl/fp16/power_fp16.h +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP16_POWER_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_POWER_FP16_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/power_parameter.h" - -#if defined(ENABLE_NEON) -typedef float16x8_t (*PowerSimdFunFp16)(float16x8_t x, const void *exponent); -#endif -typedef float16_t (*PowerScalarFunFp16)(float16_t x, const void *exponent); -typedef void (*PowerFunFp16)(const float16_t *, const float16_t *, float16_t *, int, float, float); - -#ifdef __cplusplus -extern "C" { -#endif -static inline bool CheckInteger(float16_t f) { return floorf(f) == f; } - -static inline float16_t StdPowerScalarFp16(float16_t x, const void *exponent) { - return powf(x, *(float16_t *)exponent); -} - -#if defined(ENABLE_NEON) -static inline float16x8_t StdPowerSimdFp16(float16x8_t x, const void *exponent) { - float16x8_t result; - result[0] = powf(x[0], *(float16_t *)exponent); - result[1] = powf(x[1], *(float16_t *)exponent); - result[2] = powf(x[2], *(float16_t *)exponent); - result[3] = powf(x[3], *(float16_t *)exponent); - result[4] = powf(x[4], *(float16_t *)exponent); - result[5] = powf(x[5], *(float16_t *)exponent); - result[6] = powf(x[6], *(float16_t *)exponent); - result[7] = powf(x[7], *(float16_t *)exponent); - return result; -} -#endif -int PowerFp16(const float16_t *input, const float16_t *exponent, float16_t *output, int len, float scale, float shift, - bool broadcast); -void PowerSingleFp16(const float16_t *input, const float16_t *exponent, float16_t *output, int len, float scale, - float shift); -void PowerBroadCastFp16(const float16_t *input, const float16_t *exponent, float16_t *output, int len, float scale, - float shift); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_POWER_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/quant_dtype_cast_fp16.h b/mindspore/lite/nnacl/fp16/quant_dtype_cast_fp16.h deleted file mode 100644 index 9019bf565f..0000000000 --- a/mindspore/lite/nnacl/fp16/quant_dtype_cast_fp16.h +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP16_QUANTDTYPECAST_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_QUANTDTYPECAST_FP16_H_ - -#include "nnacl/op_base.h" - -#ifdef ENABLE_NEON -#include -#endif - -#ifdef __cplusplus -extern "C" { -#endif -int DoDequantizeInt8ToFp16(int8_t *quant_values, float16_t *real_values, float scale, int32_t zp, int size); -int DoQuantizeFp16ToInt8(float16_t *real_values, int8_t *quant_values, float scale, int32_t zp, int size); - -int DoDequantizeUInt8ToFp16(uint8_t *quant_values, float16_t *real_values, float scale, int32_t zp, int size); -int DoQuantizeFp16ToUInt8(float16_t *real_values, uint8_t *quant_values, float scale, int32_t zp, int size); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_QUANTDTYPECAST_H_ diff --git a/mindspore/lite/nnacl/fp16/reduce_fp16.h b/mindspore/lite/nnacl/fp16/reduce_fp16.h deleted file mode 100644 index b6349c877e..0000000000 --- a/mindspore/lite/nnacl/fp16/reduce_fp16.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP16_REDUCE_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_REDUCE_FP16_H_ -#include "nnacl/op_base.h" -#include "nnacl/reduce_parameter.h" - -#ifdef ENABLE_NEON -#include -#endif -#ifdef __cplusplus -extern "C" { -#endif -int ReduceMeanFp16(const int outer_size, const int inner_size, const int axis_size, const float16_t *src_data, - float16_t *dst_data, const int tid, const int thread_num); -int ReduceMaxFp16(int outer_size, int inner_size, int axis_size, const float16_t *src_data, float16_t *dst_data, - int tid, int thread_num); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_REDUCE_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/scale_fp16.c b/mindspore/lite/nnacl/fp16/scale_fp16.c deleted file mode 100644 index 94d5560c2a..0000000000 --- a/mindspore/lite/nnacl/fp16/scale_fp16.c +++ /dev/null @@ -1,223 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "nnacl/fp16/scale_fp16.h" - -void Fp16ScaleInner(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, - int outer_end, int axis_size, int inner_size) { - for (int out = outer_start; out < outer_end; out++) { - int out_offset = out * axis_size * inner_size; - for (int i = 0; i < axis_size; i++) { - int axis_offset = out_offset + i * inner_size; - int in_index = 0; -#ifdef ENABLE_ARM64 - for (; in_index < inner_size - 8; in_index += 8) { - int in_offset = axis_offset + in_index; - float16x8_t data = vld1q_f16(in_data + in_offset); - float16x8_t scale_8 = vdupq_n_f16(scale[i]); - float16x8_t offset_8 = vdupq_n_f16(offset[i]); - float16x8_t reslut = vfmaq_f16(offset_8, data, scale_8); - - vst1q_f16(out_data + in_offset, reslut); - } -#endif - for (; in_index < inner_size; in_index++) { - int in_offset = axis_offset + in_index; - out_data[in_offset] = in_data[in_offset] * scale[i] + offset[i]; - } - } - } -} - -void Fp16ScaleAxis(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, - int outer_end, int axis_size) { - for (int out = outer_start; out < outer_end; out++) { - int out_offset = out * axis_size; - int index = 0; -#ifdef ENABLE_ARM64 - for (; index < axis_size - 8; index += 8) { - int in_offset = out_offset + index; - float16x8_t data = vld1q_f16(in_data + in_offset); - float16x8_t scale_8 = vld1q_f16(scale + index); - float16x8_t offset_8 = vld1q_f16(offset + index); - float16x8_t reslut = vfmaq_f16(offset_8, data, scale_8); - vst1q_f16(out_data + in_offset, reslut); - } -#endif - for (; index < axis_size; index++) { - int in_offset = out_offset + index; - out_data[in_offset] = in_data[in_offset] * scale[index] + offset[index]; - } - } -} - -void DoScaleFp16(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, - ScaleParameter *scale_param) { - int outer_step = UP_DIV(scale_param->outer_size_, scale_param->op_parameter_.thread_num_); - int outer_start = task_id * outer_step; - int outer_end = MSMIN(outer_start + outer_step, scale_param->outer_size_); - - if (scale_param->inner_size_ == 1) { - Fp16ScaleAxis(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_); - } else { - Fp16ScaleInner(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_, - scale_param->inner_size_); - } -} - -void Fp16ScaleInnerRelu(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, - int outer_end, int axis_size, int inner_size) { -#ifdef ENABLE_ARM64 - float16x8_t zeros = {0, 0, 0, 0, 0, 0, 0, 0}; -#endif - for (int out = outer_start; out < outer_end; out++) { - int out_offset = out * axis_size * inner_size; - for (int i = 0; i < axis_size; i++) { - int axis_offset = out_offset + i * inner_size; - int in_index = 0; -#ifdef ENABLE_ARM64 - for (; in_index < inner_size - 8; in_index += 8) { - int in_offset = axis_offset + in_index; - float16x8_t data = vld1q_f16(in_data + in_offset); - float16x8_t scale_8 = vdupq_n_f16(scale[i]); - float16x8_t offset_8 = vdupq_n_f16(offset[i]); - float16x8_t tmp = vfmaq_f16(offset_8, data, scale_8); - float16x8_t result = vmaxq_f16(tmp, zeros); - vst1q_f16(out_data + in_offset, result); - } -#endif - for (; in_index < inner_size; in_index++) { - int in_offset = axis_offset + in_index; - float tmp = in_data[in_offset] * scale[i] + offset[i]; - out_data[in_offset] = tmp > 0.0f ? tmp : 0.0f; - } - } - } -} - -void Fp16ScaleAxisRelu(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, - int outer_end, int axis_size) { -#ifdef ENABLE_ARM64 - float16x8_t zeros = {0, 0, 0, 0, 0, 0, 0, 0}; -#endif - for (int out = outer_start; out < outer_end; out++) { - int out_offset = out * axis_size; - int index = 0; -#ifdef ENABLE_ARM64 - for (; index < axis_size - 8; index += 8) { - int in_offset = out_offset + index; - float16x8_t data = vld1q_f16(in_data + in_offset); - float16x8_t scale_8 = vld1q_f16(scale + index); - float16x8_t offset_8 = vld1q_f16(offset + index); - float16x8_t tmp = vfmaq_f16(offset_8, data, scale_8); - float16x8_t result = vmaxq_f16(tmp, zeros); - vst1q_f16(out_data + in_offset, result); - } -#endif - for (; index < axis_size; index++) { - int in_offset = out_offset + index; - float tmp = in_data[in_offset] * scale[index] + offset[index]; - out_data[in_offset] = tmp > 0.0f ? tmp : 0.0f; - } - } -} - -void Fp16DoScaleRelu(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, - ScaleParameter *scale_param) { - int outer_step = UP_DIV(scale_param->outer_size_, scale_param->op_parameter_.thread_num_); - int outer_start = task_id * outer_step; - int outer_end = MSMIN(outer_start + outer_step, scale_param->outer_size_); - - if (scale_param->inner_size_ == 1) { - Fp16ScaleAxisRelu(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_); - } else { - Fp16ScaleInnerRelu(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_, - scale_param->inner_size_); - } -} - -void Fp16ScaleInnerRelu6(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, - int outer_end, int axis_size, int inner_size) { -#ifdef ENABLE_ARM64 - float16x8_t zeros = {0, 0, 0, 0, 0, 0, 0, 0}; - float16x8_t bounds = {6, 6, 6, 6, 6, 6, 6, 6}; -#endif - for (int out = outer_start; out < outer_end; out++) { - int out_offset = out * axis_size * inner_size; - for (int i = 0; i < axis_size; i++) { - int axis_offset = out_offset + i * inner_size; - int in_index = 0; -#ifdef ENABLE_ARM64 - for (; in_index < inner_size - 8; in_index += 8) { - int in_offset = axis_offset + in_index; - float16x8_t data = vld1q_f16(in_data + in_offset); - float16x8_t scale_8 = vdupq_n_f16(scale[i]); - float16x8_t offset_8 = vdupq_n_f16(offset[i]); - float16x8_t tmp = vfmaq_f16(offset_8, data, scale_8); - float16x8_t result = vminq_f16(vmaxq_f16(tmp, zeros), bounds); - vst1q_f16(out_data + in_offset, result); - } -#endif - for (; in_index < inner_size; in_index++) { - int in_offset = axis_offset + in_index; - float tmp = in_data[in_offset] * scale[i] + offset[i]; - out_data[in_offset] = MSMIN(MSMAX(tmp, 0.0f), 6.0f); - } - } - } -} - -void Fp16ScaleAxisRelu6(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int outer_start, - int outer_end, int axis_size) { -#ifdef ENABLE_ARM64 - float16x8_t zeros = {0, 0, 0, 0, 0, 0, 0, 0}; - float16x8_t bounds = {6, 6, 6, 6, 6, 6, 6, 6}; -#endif - for (int out = outer_start; out < outer_end; out++) { - int out_offset = out * axis_size; - int index = 0; -#ifdef ENABLE_ARM64 - for (; index < axis_size - 8; index += 8) { - int in_offset = out_offset + index; - float16x8_t data = vld1q_f16(in_data + in_offset); - float16x8_t scale_8 = vld1q_f16(scale + index); - float16x8_t offset_8 = vld1q_f16(offset + index); - float16x8_t tmp = vfmaq_f16(offset_8, data, scale_8); - float16x8_t result = vminq_f16(vmaxq_f16(tmp, zeros), bounds); - vst1q_f16(out_data + in_offset, result); - } -#endif - for (; index < axis_size; index++) { - int in_offset = out_offset + index; - float tmp = in_data[in_offset] * scale[index] + offset[index]; - out_data[in_offset] = MSMIN(MSMAX(tmp, 0.0f), 6.0f); - } - } -} - -void DoScaleRelu6Fp16(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, - ScaleParameter *scale_param) { - int outer_step = UP_DIV(scale_param->outer_size_, scale_param->op_parameter_.thread_num_); - int outer_start = task_id * outer_step; - int outer_end = MSMIN(outer_start + outer_step, scale_param->outer_size_); - - if (scale_param->inner_size_ == 1) { - Fp16ScaleAxisRelu6(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_); - } else { - Fp16ScaleInnerRelu6(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_, - scale_param->inner_size_); - } -} diff --git a/mindspore/lite/nnacl/fp16/scale_fp16.h b/mindspore/lite/nnacl/fp16/scale_fp16.h deleted file mode 100644 index 6a391c495b..0000000000 --- a/mindspore/lite/nnacl/fp16/scale_fp16.h +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_SCALE_FP16_H_ -#define MINDSPORE_LITE_NNACL_SCALE_FP16_H_ - -#include "nnacl/op_base.h" -#include "nnacl/scale.h" -#ifdef ENABLE_NEON -#include -#endif -#ifdef __cplusplus -extern "C" { -#endif -void DoScaleFp16(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, - ScaleParameter *scale_param); -void Fp16DoScaleRelu(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, - ScaleParameter *scale_param); -void DoScaleRelu6Fp16(float16_t *in_data, float16_t *out_data, float16_t *scale, float16_t *offset, int task_id, - ScaleParameter *scale_param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_SCALE_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/softmax_fp16.h b/mindspore/lite/nnacl/fp16/softmax_fp16.h deleted file mode 100644 index 2c2b575d92..0000000000 --- a/mindspore/lite/nnacl/fp16/softmax_fp16.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2020-2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP16_SOFTMAX_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_SOFTMAX_FP16_H_ - -#include "nnacl/op_base.h" -#include "nnacl/softmax_parameter.h" -#ifdef ENABLE_NEON -#include -#endif -#ifdef __cplusplus -extern "C" { -#endif -void SoftmaxNormFp16(const float16_t *src, float16_t *dst, int batch, int channel); -void SoftmaxFp16(const float16_t *input_ptr, float16_t *output_ptr, float16_t *sum_data, SoftmaxParameter *parameter); -void SoftmaxLastAxisFp16(const float16_t *src, float16_t *dst, int batch, int channel); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_SOFTMAX_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/transpose_fp16.h b/mindspore/lite/nnacl/fp16/transpose_fp16.h deleted file mode 100644 index aa7b21b63f..0000000000 --- a/mindspore/lite/nnacl/fp16/transpose_fp16.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP16_TRANSPOSE_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_TRANSPOSE_FP16_H_ - -#include "nnacl/op_base.h" -#include "nnacl/transpose.h" -#ifdef ENABLE_NEON -#include -#endif - -#ifdef __cplusplus -extern "C" { -#endif -int Fp16DoTranspose(const float16_t *in_data, float16_t *out_data, const int *output_shape, - TransposeParameter *transpose_param, int *size, int *position); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_TRANSPOSE_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/winograd_transform_fp16.h b/mindspore/lite/nnacl/fp16/winograd_transform_fp16.h deleted file mode 100644 index 42d997a3cc..0000000000 --- a/mindspore/lite/nnacl/fp16/winograd_transform_fp16.h +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP16_WINOGRAD_TRANSFORM_FP16_H_ -#define MINDSPORE_LITE_NNACL_FP16_WINOGRAD_TRANSFORM_FP16_H_ - -#include -#include -#include "nnacl/errorcode.h" -#include "nnacl/fp16/cast_fp16.h" -#include "nnacl/fp16/conv_fp16.h" -#include "nnacl/fp16/matrix_fp16.h" - -#ifdef __cplusplus -extern "C" { -#endif - -// for fp16 convolution 3x3 filter/input/output transform -void Conv3x3Fp16InputUnit(float16_t *tmp_data, float16_t *trans_input_data, size_t step); - -void Conv3x3Fp16InputTransform(const float16_t *input_data, float16_t *trans_input, float16_t *tmp_data, - int start_index, int real_cal_num, int out_w_block, ConvParameter *conv_param); - -void Conv3x3Fp16FilterTransform(const float16_t *weight_data, float16_t *trans_weight, int iC8, int output_channel, - int kernel_plane); - -void Conv3x3Fp16OutputUnit(const float16_t *gemm_out, const float16_t *bias_data, float16_t *output_data, int output_w); - -void Conv3x3Fp16OutputTransform(const float16_t *gemm_out, float16_t *out_data, const float16_t *bias_data, - int start_index, int real_cal_num, int out_w_block, ConvParameter *conv_param); - -// fp16 common winograd -void WinogradInputTransformFp16(const float16_t *input_data, float16_t *trans_input, float16_t *tmp_data, int cal_num, - int out_tile_index, int out_w_block_num, ConvParameter *conv_param, - InputTransFp16Func func); - -void WinogradOutputTransformFp16(const float16_t *gemm_out, float16_t *tmp_out_data, const float16_t *bias_data, - int cal_num, int out_tile_index, int output_unit_num, ConvParameter *conv_param, - OutputTransFp16Func func); - -// fp16 winograd weight trans -int WinogradWeightTransformFp16(const float16_t *weight_data, float16_t *winograd_data, float *matrix_g, - float *matrix_gt, int oc_block, int input_unit, int kernel_unit, int filter_channel, - int filter_batch, bool pack); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_WINOGRAD_TRANSFORM_FP16_H_ diff --git a/mindspore/lite/nnacl/fp16/winograd_utils_fp16.h b/mindspore/lite/nnacl/fp16/winograd_utils_fp16.h deleted file mode 100644 index 623e1c9120..0000000000 --- a/mindspore/lite/nnacl/fp16/winograd_utils_fp16.h +++ /dev/null @@ -1,502 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP16_WINOGRAD_UTILS_H_ -#define MINDSPORE_LITE_NNACL_FP16_WINOGRAD_UTILS_H_ - -#include -#include "nnacl/conv_parameter.h" -#include "nnacl/op_base.h" - -#define MAX_LEN 256 - -#ifdef __cplusplus -extern "C" { -#endif -typedef void (*InputTransFp16Func)(const float16_t *src_data, float16_t *dst_data, int src_step, int dst_step, - int real_c); - -typedef void (*OutputTransFp16Func)(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); - -void GeneralInputTransformUnitFp16(const float16_t *src_data, float16_t *dst_data, float16_t *matrix_b, - float16_t *matrix_bt, int src_step, int dst_step, int in_unit); - -void GeneralOutputTransformUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - float16_t *matrix_a, float16_t *matrix_at, int src_step, int dst_step, int in_unit, - int out_unit); - -#define Load16DataFp16 \ - src[0] = vld1q_f16(src_data + 0 * src_step); \ - src[1] = vld1q_f16(src_data + 1 * src_step); \ - src[2] = vld1q_f16(src_data + 2 * src_step); \ - src[3] = vld1q_f16(src_data + 3 * src_step); \ - src[4] = vld1q_f16(src_data + 4 * src_step); \ - src[5] = vld1q_f16(src_data + 5 * src_step); \ - src[6] = vld1q_f16(src_data + 6 * src_step); \ - src[7] = vld1q_f16(src_data + 7 * src_step); \ - src[8] = vld1q_f16(src_data + 8 * src_step); \ - src[9] = vld1q_f16(src_data + 9 * src_step); \ - src[10] = vld1q_f16(src_data + 10 * src_step); \ - src[11] = vld1q_f16(src_data + 11 * src_step); \ - src[12] = vld1q_f16(src_data + 12 * src_step); \ - src[13] = vld1q_f16(src_data + 13 * src_step); \ - src[14] = vld1q_f16(src_data + 14 * src_step); \ - src[15] = vld1q_f16(src_data + 15 * src_step); - -#define Load16DataC4Fp16 \ - src[0] = vld1_f16(src_data + 0 * src_step); \ - src[1] = vld1_f16(src_data + 1 * src_step); \ - src[2] = vld1_f16(src_data + 2 * src_step); \ - src[3] = vld1_f16(src_data + 3 * src_step); \ - src[4] = vld1_f16(src_data + 4 * src_step); \ - src[5] = vld1_f16(src_data + 5 * src_step); \ - src[6] = vld1_f16(src_data + 6 * src_step); \ - src[7] = vld1_f16(src_data + 7 * src_step); \ - src[8] = vld1_f16(src_data + 8 * src_step); \ - src[9] = vld1_f16(src_data + 9 * src_step); \ - src[10] = vld1_f16(src_data + 10 * src_step); \ - src[11] = vld1_f16(src_data + 11 * src_step); \ - src[12] = vld1_f16(src_data + 12 * src_step); \ - src[13] = vld1_f16(src_data + 13 * src_step); \ - src[14] = vld1_f16(src_data + 14 * src_step); \ - src[15] = vld1_f16(src_data + 15 * src_step); - -#define Load36DataFp16 \ - src[0] = vld1q_f16(src_data + 0 * src_step); \ - src[1] = vld1q_f16(src_data + 1 * src_step); \ - src[2] = vld1q_f16(src_data + 2 * src_step); \ - src[3] = vld1q_f16(src_data + 3 * src_step); \ - src[4] = vld1q_f16(src_data + 4 * src_step); \ - src[5] = vld1q_f16(src_data + 5 * src_step); \ - src[6] = vld1q_f16(src_data + 6 * src_step); \ - src[7] = vld1q_f16(src_data + 7 * src_step); \ - src[8] = vld1q_f16(src_data + 8 * src_step); \ - src[9] = vld1q_f16(src_data + 9 * src_step); \ - src[10] = vld1q_f16(src_data + 10 * src_step); \ - src[11] = vld1q_f16(src_data + 11 * src_step); \ - src[12] = vld1q_f16(src_data + 12 * src_step); \ - src[13] = vld1q_f16(src_data + 13 * src_step); \ - src[14] = vld1q_f16(src_data + 14 * src_step); \ - src[15] = vld1q_f16(src_data + 15 * src_step); \ - src[16] = vld1q_f16(src_data + 16 * src_step); \ - src[17] = vld1q_f16(src_data + 17 * src_step); \ - src[18] = vld1q_f16(src_data + 18 * src_step); \ - src[19] = vld1q_f16(src_data + 19 * src_step); \ - src[20] = vld1q_f16(src_data + 20 * src_step); \ - src[21] = vld1q_f16(src_data + 21 * src_step); \ - src[22] = vld1q_f16(src_data + 22 * src_step); \ - src[23] = vld1q_f16(src_data + 23 * src_step); \ - src[24] = vld1q_f16(src_data + 24 * src_step); \ - src[25] = vld1q_f16(src_data + 25 * src_step); \ - src[26] = vld1q_f16(src_data + 26 * src_step); \ - src[27] = vld1q_f16(src_data + 27 * src_step); \ - src[28] = vld1q_f16(src_data + 28 * src_step); \ - src[29] = vld1q_f16(src_data + 29 * src_step); \ - src[30] = vld1q_f16(src_data + 30 * src_step); \ - src[31] = vld1q_f16(src_data + 31 * src_step); \ - src[32] = vld1q_f16(src_data + 32 * src_step); \ - src[33] = vld1q_f16(src_data + 33 * src_step); \ - src[34] = vld1q_f16(src_data + 34 * src_step); \ - src[35] = vld1q_f16(src_data + 35 * src_step); - -#define Load36DataC4Fp16 \ - src[0] = vld1_f16(src_data + 0 * src_step); \ - src[1] = vld1_f16(src_data + 1 * src_step); \ - src[2] = vld1_f16(src_data + 2 * src_step); \ - src[3] = vld1_f16(src_data + 3 * src_step); \ - src[4] = vld1_f16(src_data + 4 * src_step); \ - src[5] = vld1_f16(src_data + 5 * src_step); \ - src[6] = vld1_f16(src_data + 6 * src_step); \ - src[7] = vld1_f16(src_data + 7 * src_step); \ - src[8] = vld1_f16(src_data + 8 * src_step); \ - src[9] = vld1_f16(src_data + 9 * src_step); \ - src[10] = vld1_f16(src_data + 10 * src_step); \ - src[11] = vld1_f16(src_data + 11 * src_step); \ - src[12] = vld1_f16(src_data + 12 * src_step); \ - src[13] = vld1_f16(src_data + 13 * src_step); \ - src[14] = vld1_f16(src_data + 14 * src_step); \ - src[15] = vld1_f16(src_data + 15 * src_step); \ - src[16] = vld1_f16(src_data + 16 * src_step); \ - src[17] = vld1_f16(src_data + 17 * src_step); \ - src[18] = vld1_f16(src_data + 18 * src_step); \ - src[19] = vld1_f16(src_data + 19 * src_step); \ - src[20] = vld1_f16(src_data + 20 * src_step); \ - src[21] = vld1_f16(src_data + 21 * src_step); \ - src[22] = vld1_f16(src_data + 22 * src_step); \ - src[23] = vld1_f16(src_data + 23 * src_step); \ - src[24] = vld1_f16(src_data + 24 * src_step); \ - src[25] = vld1_f16(src_data + 25 * src_step); \ - src[26] = vld1_f16(src_data + 26 * src_step); \ - src[27] = vld1_f16(src_data + 27 * src_step); \ - src[28] = vld1_f16(src_data + 28 * src_step); \ - src[29] = vld1_f16(src_data + 29 * src_step); \ - src[30] = vld1_f16(src_data + 30 * src_step); \ - src[31] = vld1_f16(src_data + 31 * src_step); \ - src[32] = vld1_f16(src_data + 32 * src_step); \ - src[33] = vld1_f16(src_data + 33 * src_step); \ - src[34] = vld1_f16(src_data + 34 * src_step); \ - src[35] = vld1_f16(src_data + 35 * src_step); - -#define Load64DataFp16 \ - src[0] = vld1q_f16(src_data + 0 * src_step); \ - src[1] = vld1q_f16(src_data + 1 * src_step); \ - src[2] = vld1q_f16(src_data + 2 * src_step); \ - src[3] = vld1q_f16(src_data + 3 * src_step); \ - src[4] = vld1q_f16(src_data + 4 * src_step); \ - src[5] = vld1q_f16(src_data + 5 * src_step); \ - src[6] = vld1q_f16(src_data + 6 * src_step); \ - src[7] = vld1q_f16(src_data + 7 * src_step); \ - src[8] = vld1q_f16(src_data + 8 * src_step); \ - src[9] = vld1q_f16(src_data + 9 * src_step); \ - src[10] = vld1q_f16(src_data + 10 * src_step); \ - src[11] = vld1q_f16(src_data + 11 * src_step); \ - src[12] = vld1q_f16(src_data + 12 * src_step); \ - src[13] = vld1q_f16(src_data + 13 * src_step); \ - src[14] = vld1q_f16(src_data + 14 * src_step); \ - src[15] = vld1q_f16(src_data + 15 * src_step); \ - src[16] = vld1q_f16(src_data + 16 * src_step); \ - src[17] = vld1q_f16(src_data + 17 * src_step); \ - src[18] = vld1q_f16(src_data + 18 * src_step); \ - src[19] = vld1q_f16(src_data + 19 * src_step); \ - src[20] = vld1q_f16(src_data + 20 * src_step); \ - src[21] = vld1q_f16(src_data + 21 * src_step); \ - src[22] = vld1q_f16(src_data + 22 * src_step); \ - src[23] = vld1q_f16(src_data + 23 * src_step); \ - src[24] = vld1q_f16(src_data + 24 * src_step); \ - src[25] = vld1q_f16(src_data + 25 * src_step); \ - src[26] = vld1q_f16(src_data + 26 * src_step); \ - src[27] = vld1q_f16(src_data + 27 * src_step); \ - src[28] = vld1q_f16(src_data + 28 * src_step); \ - src[29] = vld1q_f16(src_data + 29 * src_step); \ - src[30] = vld1q_f16(src_data + 30 * src_step); \ - src[31] = vld1q_f16(src_data + 31 * src_step); \ - src[32] = vld1q_f16(src_data + 32 * src_step); \ - src[33] = vld1q_f16(src_data + 33 * src_step); \ - src[34] = vld1q_f16(src_data + 34 * src_step); \ - src[35] = vld1q_f16(src_data + 35 * src_step); \ - src[36] = vld1q_f16(src_data + 36 * src_step); \ - src[37] = vld1q_f16(src_data + 37 * src_step); \ - src[38] = vld1q_f16(src_data + 38 * src_step); \ - src[39] = vld1q_f16(src_data + 39 * src_step); \ - src[40] = vld1q_f16(src_data + 40 * src_step); \ - src[41] = vld1q_f16(src_data + 41 * src_step); \ - src[42] = vld1q_f16(src_data + 42 * src_step); \ - src[43] = vld1q_f16(src_data + 43 * src_step); \ - src[44] = vld1q_f16(src_data + 44 * src_step); \ - src[45] = vld1q_f16(src_data + 45 * src_step); \ - src[46] = vld1q_f16(src_data + 46 * src_step); \ - src[47] = vld1q_f16(src_data + 47 * src_step); \ - src[48] = vld1q_f16(src_data + 48 * src_step); \ - src[49] = vld1q_f16(src_data + 49 * src_step); \ - src[50] = vld1q_f16(src_data + 50 * src_step); \ - src[51] = vld1q_f16(src_data + 51 * src_step); \ - src[52] = vld1q_f16(src_data + 52 * src_step); \ - src[53] = vld1q_f16(src_data + 53 * src_step); \ - src[54] = vld1q_f16(src_data + 54 * src_step); \ - src[55] = vld1q_f16(src_data + 55 * src_step); \ - src[56] = vld1q_f16(src_data + 56 * src_step); \ - src[57] = vld1q_f16(src_data + 57 * src_step); \ - src[58] = vld1q_f16(src_data + 58 * src_step); \ - src[59] = vld1q_f16(src_data + 59 * src_step); \ - src[60] = vld1q_f16(src_data + 60 * src_step); \ - src[61] = vld1q_f16(src_data + 61 * src_step); \ - src[62] = vld1q_f16(src_data + 62 * src_step); \ - src[63] = vld1q_f16(src_data + 63 * src_step); - -#define Load64DataC4Fp16 \ - src[0] = vld1_f16(src_data + 0 * src_step); \ - src[1] = vld1_f16(src_data + 1 * src_step); \ - src[2] = vld1_f16(src_data + 2 * src_step); \ - src[3] = vld1_f16(src_data + 3 * src_step); \ - src[4] = vld1_f16(src_data + 4 * src_step); \ - src[5] = vld1_f16(src_data + 5 * src_step); \ - src[6] = vld1_f16(src_data + 6 * src_step); \ - src[7] = vld1_f16(src_data + 7 * src_step); \ - src[8] = vld1_f16(src_data + 8 * src_step); \ - src[9] = vld1_f16(src_data + 9 * src_step); \ - src[10] = vld1_f16(src_data + 10 * src_step); \ - src[11] = vld1_f16(src_data + 11 * src_step); \ - src[12] = vld1_f16(src_data + 12 * src_step); \ - src[13] = vld1_f16(src_data + 13 * src_step); \ - src[14] = vld1_f16(src_data + 14 * src_step); \ - src[15] = vld1_f16(src_data + 15 * src_step); \ - src[16] = vld1_f16(src_data + 16 * src_step); \ - src[17] = vld1_f16(src_data + 17 * src_step); \ - src[18] = vld1_f16(src_data + 18 * src_step); \ - src[19] = vld1_f16(src_data + 19 * src_step); \ - src[20] = vld1_f16(src_data + 20 * src_step); \ - src[21] = vld1_f16(src_data + 21 * src_step); \ - src[22] = vld1_f16(src_data + 22 * src_step); \ - src[23] = vld1_f16(src_data + 23 * src_step); \ - src[24] = vld1_f16(src_data + 24 * src_step); \ - src[25] = vld1_f16(src_data + 25 * src_step); \ - src[26] = vld1_f16(src_data + 26 * src_step); \ - src[27] = vld1_f16(src_data + 27 * src_step); \ - src[28] = vld1_f16(src_data + 28 * src_step); \ - src[29] = vld1_f16(src_data + 29 * src_step); \ - src[30] = vld1_f16(src_data + 30 * src_step); \ - src[31] = vld1_f16(src_data + 31 * src_step); \ - src[32] = vld1_f16(src_data + 32 * src_step); \ - src[33] = vld1_f16(src_data + 33 * src_step); \ - src[34] = vld1_f16(src_data + 34 * src_step); \ - src[35] = vld1_f16(src_data + 35 * src_step); \ - src[36] = vld1_f16(src_data + 36 * src_step); \ - src[37] = vld1_f16(src_data + 37 * src_step); \ - src[38] = vld1_f16(src_data + 38 * src_step); \ - src[39] = vld1_f16(src_data + 39 * src_step); \ - src[40] = vld1_f16(src_data + 40 * src_step); \ - src[41] = vld1_f16(src_data + 41 * src_step); \ - src[42] = vld1_f16(src_data + 42 * src_step); \ - src[43] = vld1_f16(src_data + 43 * src_step); \ - src[44] = vld1_f16(src_data + 44 * src_step); \ - src[45] = vld1_f16(src_data + 45 * src_step); \ - src[46] = vld1_f16(src_data + 46 * src_step); \ - src[47] = vld1_f16(src_data + 47 * src_step); \ - src[48] = vld1_f16(src_data + 48 * src_step); \ - src[49] = vld1_f16(src_data + 49 * src_step); \ - src[50] = vld1_f16(src_data + 50 * src_step); \ - src[51] = vld1_f16(src_data + 51 * src_step); \ - src[52] = vld1_f16(src_data + 52 * src_step); \ - src[53] = vld1_f16(src_data + 53 * src_step); \ - src[54] = vld1_f16(src_data + 54 * src_step); \ - src[55] = vld1_f16(src_data + 55 * src_step); \ - src[56] = vld1_f16(src_data + 56 * src_step); \ - src[57] = vld1_f16(src_data + 57 * src_step); \ - src[58] = vld1_f16(src_data + 58 * src_step); \ - src[59] = vld1_f16(src_data + 59 * src_step); \ - src[60] = vld1_f16(src_data + 60 * src_step); \ - src[61] = vld1_f16(src_data + 61 * src_step); \ - src[62] = vld1_f16(src_data + 62 * src_step); \ - src[63] = vld1_f16(src_data + 63 * src_step); - -InputTransFp16Func GetInputTransFp16Func(int input_unit); - -void InputTransform4x4UnitFp16(const float16_t *src_data, float16_t *dst_data, int src_step, int dst_step, int real_c); - -void InputTransform6x6UnitFp16(const float16_t *src_data, float16_t *dst_data, int src_step, int dst_step, int real_c); - -void InputTransform8x8UnitFp16(const float16_t *src_data, float16_t *dst_data, int src_step, int dst_step, int real_c); - -OutputTransFp16Func GetOutputTransFp16Func(int input_unit, int output_unit, ActType act_type); - -#define Store4DataFp16 \ - vst1q_f16(dst_data, m[0]); \ - vst1q_f16(dst_data + out_c, m[1]); \ - vst1q_f16(dst_data + dst_step * out_c, m[2]); \ - vst1q_f16(dst_data + dst_step * out_c + out_c, m[3]); - -#define Store4DataC4Fp16 \ - vst1_f16(dst_data, m[0]); \ - vst1_f16(dst_data + out_c, m[1]); \ - vst1_f16(dst_data + dst_step * out_c, m[2]); \ - vst1_f16(dst_data + dst_step * out_c + out_c, m[3]); - -#define Store9DataFp16 \ - vst1q_f16(dst_data, m[0]); \ - vst1q_f16(dst_data + out_c, m[1]); \ - vst1q_f16(dst_data + 2 * out_c, m[2]); \ - vst1q_f16(dst_data + dst_step * out_c, m[3]); \ - vst1q_f16(dst_data + dst_step * out_c + out_c, m[4]); \ - vst1q_f16(dst_data + dst_step * out_c + 2 * out_c, m[5]); \ - vst1q_f16(dst_data + 2 * dst_step * out_c, m[6]); \ - vst1q_f16(dst_data + 2 * dst_step * out_c + out_c, m[7]); \ - vst1q_f16(dst_data + 2 * dst_step * out_c + 2 * out_c, m[8]); - -#define Store9DataC4Fp16 \ - vst1_f16(dst_data, m[0]); \ - vst1_f16(dst_data + out_c, m[1]); \ - vst1_f16(dst_data + 2 * out_c, m[2]); \ - vst1_f16(dst_data + dst_step * out_c, m[3]); \ - vst1_f16(dst_data + dst_step * out_c + out_c, m[4]); \ - vst1_f16(dst_data + dst_step * out_c + 2 * out_c, m[5]); \ - vst1_f16(dst_data + 2 * dst_step * out_c, m[6]); \ - vst1_f16(dst_data + 2 * dst_step * out_c + out_c, m[7]); \ - vst1_f16(dst_data + 2 * dst_step * out_c + 2 * out_c, m[8]); - -#define Store16DataFp16 \ - vst1q_f16(dst_data, m[0]); \ - vst1q_f16(dst_data + out_c, m[1]); \ - vst1q_f16(dst_data + 2 * out_c, m[2]); \ - vst1q_f16(dst_data + 3 * out_c, m[3]); \ - vst1q_f16(dst_data + dst_step * out_c, m[4]); \ - vst1q_f16(dst_data + dst_step * out_c + out_c, m[5]); \ - vst1q_f16(dst_data + dst_step * out_c + 2 * out_c, m[6]); \ - vst1q_f16(dst_data + dst_step * out_c + 3 * out_c, m[7]); \ - vst1q_f16(dst_data + 2 * dst_step * out_c, m[8]); \ - vst1q_f16(dst_data + 2 * dst_step * out_c + out_c, m[9]); \ - vst1q_f16(dst_data + 2 * dst_step * out_c + 2 * out_c, m[10]); \ - vst1q_f16(dst_data + 2 * dst_step * out_c + 3 * out_c, m[11]); \ - vst1q_f16(dst_data + 3 * dst_step * out_c, m[12]); \ - vst1q_f16(dst_data + 3 * dst_step * out_c + out_c, m[13]); \ - vst1q_f16(dst_data + 3 * dst_step * out_c + 2 * out_c, m[14]); \ - vst1q_f16(dst_data + 3 * dst_step * out_c + 3 * out_c, m[15]); - -#define Store16DataC4Fp16 \ - vst1_f16(dst_data, m[0]); \ - vst1_f16(dst_data + out_c, m[1]); \ - vst1_f16(dst_data + 2 * out_c, m[2]); \ - vst1_f16(dst_data + 3 * out_c, m[3]); \ - vst1_f16(dst_data + dst_step * out_c, m[4]); \ - vst1_f16(dst_data + dst_step * out_c + out_c, m[5]); \ - vst1_f16(dst_data + dst_step * out_c + 2 * out_c, m[6]); \ - vst1_f16(dst_data + dst_step * out_c + 3 * out_c, m[7]); \ - vst1_f16(dst_data + 2 * dst_step * out_c, m[8]); \ - vst1_f16(dst_data + 2 * dst_step * out_c + out_c, m[9]); \ - vst1_f16(dst_data + 2 * dst_step * out_c + 2 * out_c, m[10]); \ - vst1_f16(dst_data + 2 * dst_step * out_c + 3 * out_c, m[11]); \ - vst1_f16(dst_data + 3 * dst_step * out_c, m[12]); \ - vst1_f16(dst_data + 3 * dst_step * out_c + out_c, m[13]); \ - vst1_f16(dst_data + 3 * dst_step * out_c + 2 * out_c, m[14]); \ - vst1_f16(dst_data + 3 * dst_step * out_c + 3 * out_c, m[15]); - -#define Store25DataFp16 \ - vst1q_f16(dst_data, m[0]); \ - vst1q_f16(dst_data + out_c, m[1]); \ - vst1q_f16(dst_data + 2 * out_c, m[2]); \ - vst1q_f16(dst_data + 3 * out_c, m[3]); \ - vst1q_f16(dst_data + 4 * out_c, m[4]); \ - vst1q_f16(dst_data + dst_step * out_c, m[5]); \ - vst1q_f16(dst_data + dst_step * out_c + out_c, m[6]); \ - vst1q_f16(dst_data + dst_step * out_c + 2 * out_c, m[7]); \ - vst1q_f16(dst_data + dst_step * out_c + 3 * out_c, m[8]); \ - vst1q_f16(dst_data + dst_step * out_c + 4 * out_c, m[9]); \ - vst1q_f16(dst_data + 2 * dst_step * out_c, m[10]); \ - vst1q_f16(dst_data + 2 * dst_step * out_c + out_c, m[11]); \ - vst1q_f16(dst_data + 2 * dst_step * out_c + 2 * out_c, m[12]); \ - vst1q_f16(dst_data + 2 * dst_step * out_c + 3 * out_c, m[13]); \ - vst1q_f16(dst_data + 2 * dst_step * out_c + 4 * out_c, m[14]); \ - vst1q_f16(dst_data + 3 * dst_step * out_c, m[15]); \ - vst1q_f16(dst_data + 3 * dst_step * out_c + out_c, m[16]); \ - vst1q_f16(dst_data + 3 * dst_step * out_c + 2 * out_c, m[17]); \ - vst1q_f16(dst_data + 3 * dst_step * out_c + 3 * out_c, m[18]); \ - vst1q_f16(dst_data + 3 * dst_step * out_c + 4 * out_c, m[19]); \ - vst1q_f16(dst_data + 4 * dst_step * out_c, m[20]); \ - vst1q_f16(dst_data + 4 * dst_step * out_c + out_c, m[21]); \ - vst1q_f16(dst_data + 4 * dst_step * out_c + 2 * out_c, m[22]); \ - vst1q_f16(dst_data + 4 * dst_step * out_c + 3 * out_c, m[23]); \ - vst1q_f16(dst_data + 4 * dst_step * out_c + 4 * out_c, m[24]); - -#define Store25DataC4Fp16 \ - vst1_f16(dst_data, m[0]); \ - vst1_f16(dst_data + out_c, m[1]); \ - vst1_f16(dst_data + 2 * out_c, m[2]); \ - vst1_f16(dst_data + 3 * out_c, m[3]); \ - vst1_f16(dst_data + 4 * out_c, m[4]); \ - vst1_f16(dst_data + dst_step * out_c, m[5]); \ - vst1_f16(dst_data + dst_step * out_c + out_c, m[6]); \ - vst1_f16(dst_data + dst_step * out_c + 2 * out_c, m[7]); \ - vst1_f16(dst_data + dst_step * out_c + 3 * out_c, m[8]); \ - vst1_f16(dst_data + dst_step * out_c + 4 * out_c, m[9]); \ - vst1_f16(dst_data + 2 * dst_step * out_c, m[10]); \ - vst1_f16(dst_data + 2 * dst_step * out_c + out_c, m[11]); \ - vst1_f16(dst_data + 2 * dst_step * out_c + 2 * out_c, m[12]); \ - vst1_f16(dst_data + 2 * dst_step * out_c + 3 * out_c, m[13]); \ - vst1_f16(dst_data + 2 * dst_step * out_c + 4 * out_c, m[14]); \ - vst1_f16(dst_data + 3 * dst_step * out_c, m[15]); \ - vst1_f16(dst_data + 3 * dst_step * out_c + out_c, m[16]); \ - vst1_f16(dst_data + 3 * dst_step * out_c + 2 * out_c, m[17]); \ - vst1_f16(dst_data + 3 * dst_step * out_c + 3 * out_c, m[18]); \ - vst1_f16(dst_data + 3 * dst_step * out_c + 4 * out_c, m[19]); \ - vst1_f16(dst_data + 4 * dst_step * out_c, m[20]); \ - vst1_f16(dst_data + 4 * dst_step * out_c + out_c, m[21]); \ - vst1_f16(dst_data + 4 * dst_step * out_c + 2 * out_c, m[22]); \ - vst1_f16(dst_data + 4 * dst_step * out_c + 3 * out_c, m[23]); \ - vst1_f16(dst_data + 4 * dst_step * out_c + 4 * out_c, m[24]); - -void OutputTransform4x2UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform4x2ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform4x2Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform4x3UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform4x3ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform4x3Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); - -void OutputTransform6x2UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x2ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x2Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x3UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x3ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x3Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x4UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x4ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x4Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x5UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x5ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x5Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); - -void OutputTransform8x2UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x2ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x2Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x3UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x3ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x3Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x4UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x4ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x4Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x5UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x5ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x5Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x6ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x6Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x7UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x7ReluUnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x7Relu6UnitFp16(const float16_t *src_data, float16_t *dst_data, const float16_t *bias_data, - int src_step, int dst_step, int out_c, int r_w, int r_h, int r_c); - -int SelectOutputUnitFp16(ConvParameter *conv_param); - -void CheckIfUseWinogradFp16(bool *use_winograd, int *output_unit, ConvParameter *conv_param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_WINOGRAD_UTILS_H_ diff --git a/mindspore/lite/nnacl/fp16_grad/activation_grad.h b/mindspore/lite/nnacl/fp16_grad/activation_grad.h deleted file mode 100644 index 79c53584b7..0000000000 --- a/mindspore/lite/nnacl/fp16_grad/activation_grad.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP16_GRAD_ACTIVATION_GRAD_H_ -#define MINDSPORE_LITE_NNACL_FP16_GRAD_ACTIVATION_GRAD_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include -#include "nnacl/op_base.h" -#include "nnacl/int8/fixed_point.h" - -typedef struct ActivationGradParameterFp16 { - OpParameter op_parameter; - int type_; - float alpha_; -} ActivationGradParameterFp16; -#ifdef __cplusplus -extern "C" { -#endif - -int Fp16ReluGrad(const float16_t *src0, const float16_t *src1, size_t length, float16_t *dst); -int Fp16SigmoidGrad(const float16_t *src0, const float16_t *src1, size_t length, float16_t *dst); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_GRAD_ACTIVATION_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp16_grad/arithmetic_self_grad.h b/mindspore/lite/nnacl/fp16_grad/arithmetic_self_grad.h deleted file mode 100644 index 3d894581ac..0000000000 --- a/mindspore/lite/nnacl/fp16_grad/arithmetic_self_grad.h +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP16_GRAD_ARITHMETHIC_SELF_GRAD_H_ -#define MINDSPORE_LITE_NNACL_FP16_GRAD_ARITHMETHIC_SELF_GRAD_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include -#include "nnacl/op_base.h" - -typedef struct ArithmeticSelfGradParameterFp16 { - OpParameter op_parameter; - int type_; -} ArithmeticSelfGradParameterFp16; -#ifdef __cplusplus -extern "C" { -#endif - -int Fp16LogGrad(const float16_t *src0, const float16_t *src1, size_t length, float16_t *dst); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP16_GRAD_ARITHMETHIC_SELF_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32/activation_fp32.h b/mindspore/lite/nnacl/fp32/activation_fp32.h deleted file mode 100644 index 1f741c50b5..0000000000 --- a/mindspore/lite/nnacl/fp32/activation_fp32.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_ACTIVATION_H_ -#define MINDSPORE_LITE_NNACL_FP32_ACTIVATION_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/int8/fixed_point.h" - -typedef struct ActivationParameter { - OpParameter op_parameter_; - int type_; - float alpha_; - float min_val_; - float max_val_; -} ActivationParameter; - -#ifdef __cplusplus -extern "C" { -#endif -int Fp32Relu(const float *src, int length, float *dst); -int Fp32Relu6(const float *src, int length, float *dst); -int LRelu(const float *src, int length, float *dst, float alpha); -int Sigmoid(const float *src, int length, float *dst); -int Tanh(const float *src, int length, float *dst); -int HSigmoid(const float *src, int length, float *dst); -int Swish(const float *src, int length, float *dst); -int HSwish(const float *src, int length, float *dst); -int HardTanh(const float *src, int length, float *dst, float min_val, float max_val); -int Gelu(const float *src, int length, float *dst, bool approximate); - -float TanhOpt(float src); -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_FP32_ACTIVATION_H_ diff --git a/mindspore/lite/nnacl/fp32/add_fp32.h b/mindspore/lite/nnacl/fp32/add_fp32.h deleted file mode 100644 index 4344f33175..0000000000 --- a/mindspore/lite/nnacl/fp32/add_fp32.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_ADD_H_ -#define MINDSPORE_LITE_NNACL_FP32_ADD_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" -#include "nnacl/base/arithmetic_base.h" -#include "nnacl/errorcode.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ElementAdd(const float *in0, const float *in1, float *out, int size); -int ElementAddRelu(const float *in0, const float *in1, float *out, int size); -int ElementAddRelu6(const float *in0, const float *in1, float *out, int size); -int ElementAddInt(const int *in0, const int *in1, int *out, int size); -int ElementOptAdd(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); -int ElementOptAddInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); -int ElementOptAddRelu(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); -int ElementOptAddRelu6(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); -int BroadcastAdd(const float *in0, const float *in1, float *tile_in0, float *tile_in1, float *out, int size, - ArithmeticParameter *param); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_ADD_H_ diff --git a/mindspore/lite/nnacl/fp32/adder_fp32.h b/mindspore/lite/nnacl/fp32/adder_fp32.h deleted file mode 100644 index fd3956ac08..0000000000 --- a/mindspore/lite/nnacl/fp32/adder_fp32.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_ADDER_H_ -#define MINDSPORE_LITE_NNACL_FP32_ADDER_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/pack.h" -#include "nnacl/op_base.h" -#include "nnacl/common_func.h" -#include "nnacl/conv_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#ifdef ENABLE_ARM64 -void AdderFloatNeon64(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, - int col, size_t stride); -#endif - -void AdderOpt(const float *a, const float *b, float *c, const float *bias, ActType act_type, int deep, int row, int col, - size_t stride); - -void AdderFp32(const float *input_data, float *packed_input, const float *packed_weight, const float *bias_data, - float *col_major_input, float *output_data, int task_id, ConvParameter *conv_param); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_ADDER_H_ diff --git a/mindspore/lite/nnacl/fp32/arg_min_max_fp32.h b/mindspore/lite/nnacl/fp32/arg_min_max_fp32.h deleted file mode 100644 index 509e85203f..0000000000 --- a/mindspore/lite/nnacl/fp32/arg_min_max_fp32.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_ARG_MIN_MAX_H_ -#define MINDSPORE_LITE_NNACL_FP32_ARG_MIN_MAX_H_ - -#include "nnacl/nnacl_common.h" -#include "nnacl/arg_min_max_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -void ArgMinMaxFp32(const float *input, void *output, float *output_value, const int *in_shape, - const ArgMinMaxParameter *param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_ARG_MIN_MAX_H_ diff --git a/mindspore/lite/nnacl/fp32/arithmetic_compare_fp32.h b/mindspore/lite/nnacl/fp32/arithmetic_compare_fp32.h deleted file mode 100644 index 5f0ed2d58b..0000000000 --- a/mindspore/lite/nnacl/fp32/arithmetic_compare_fp32.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_ARITHMETIC_COMPARE_H_ -#define MINDSPORE_LITE_NNACL_ARITHMETIC_COMPARE_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" -#include "nnacl/errorcode.h" - -#ifdef __cplusplus -extern "C" { -#endif -int ElementEqualFp32(const float *input0, const float *input1, uint8_t *output, int element_size); -int ElementEqualInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); - -int ElementNotEqualFp32(const float *input0, const float *input1, uint8_t *output, int element_size); -int ElementNotEqualInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); - -int ElementLessFp32(const float *input0, const float *input1, uint8_t *output, int element_size); -int ElementLessInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); - -int ElementLessEqualFp32(const float *input0, const float *input1, uint8_t *output, int element_size); -int ElementLessEqualInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); - -int ElementGreaterFp32(const float *input0, const float *input1, uint8_t *output, int element_size); -int ElementGreaterInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); - -int ElementGreaterEqualFp32(const float *input0, const float *input1, uint8_t *output, int element_size); -int ElementGreaterEqualInt32(const int32_t *input0, const int32_t *input1, uint8_t *output, int element_size); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_ARITHMETIC_COMPARE_H_ diff --git a/mindspore/lite/nnacl/fp32/arithmetic_fp32.h b/mindspore/lite/nnacl/fp32/arithmetic_fp32.h deleted file mode 100644 index b329bf6908..0000000000 --- a/mindspore/lite/nnacl/fp32/arithmetic_fp32.h +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_ARITHMETIC_H_ -#define MINDSPORE_LITE_NNACL_ARITHMETIC_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" -#include "nnacl/base/arithmetic_base.h" -#include "nnacl/errorcode.h" -#include "nnacl/fp32/add_fp32.h" -#include "nnacl/fp32/mul_fp32.h" -#include "nnacl/fp32/div_fp32.h" -#include "nnacl/fp32/sub_fp32.h" -#include "nnacl/fp32/squared_difference.h" - -#ifdef __cplusplus -extern "C" { -#endif -void TileOneDimensionFp32(const float *inData, float *outData, int dim, size_t ndim, const int *inShape, - const int *inStrides, const int *outStrides, const int *multiple); -void TileDimensionsFp32(const float *data0, const float *data1, float *tile_data0, float *tile_data1, - ArithmeticParameter *param); -/* logical and */ -int ElementLogicalAnd(const float *in0, const float *in1, float *out, int size); -int ElementLogicalAndInt(const int *in0, const int *in1, int *out, int size); -int ElementLogicalAndBool(const bool *in0, const bool *in1, bool *out, int size); - -/* logical or */ -int ElementLogicalOr(const float *in0, const float *in1, float *out, int size); -int ElementLogicalOrBool(const bool *in0, const bool *in1, bool *out, int size); - -/* max min */ -int ElementMaximum(const float *in0, const float *in1, float *out, int size); -int ElementMinimum(const float *in0, const float *in1, float *out, int size); -int ElementMaximumInt(const int *in0, const int *in1, int *out, int size); -int ElementMinimumInt(const int *input0, const int *input1, int *output, const int element_size); - -/* floor div */ -int ElementFloorDiv(const float *in0, const float *in1, float *out, int size); -int ElementFloorDivInt(const int *in0, const int *in1, int *out, int size); - -/* floor mod */ -int ElementFloorMod(const float *in0, const float *in1, float *out, int size); -int ElementFloorModInt(const int *in0, const int *in1, int *out, int size); - -/* mod */ -int ElementMod(const float *in0, const float *in1, float *out, int size); -int ElementModInt(const int *in0, const int *in1, int *out, int size); -int ElementOptMod(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); -int ElementOptModInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_ARITHMETIC_H_ diff --git a/mindspore/lite/nnacl/fp32/arithmetic_self_fp32.h b/mindspore/lite/nnacl/fp32/arithmetic_self_fp32.h deleted file mode 100644 index aefd434d6f..0000000000 --- a/mindspore/lite/nnacl/fp32/arithmetic_self_fp32.h +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_ARITHMETIC_SELF_H_ -#define MINDSPORE_LITE_NNACL_ARITHMETIC_SELF_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" -#include "nnacl/errorcode.h" - -#ifdef __cplusplus -extern "C" { -#endif -int ElementAbs(const float *input, float *output, const int element_size); - -int ElementCos(const float *input, float *output, const int element_size); - -int ElementLog(const float *input, float *output, const int element_size); - -int ElementSquare(const float *input, float *output, const int element_size); - -int ElementSqrt(const float *input, float *output, const int element_size); - -int ElementRsqrt(const float *input, float *output, const int element_size); - -int ElementSin(const float *input, float *output, const int element_size); - -int ElementLogicalNot(const float *input, float *output, const int element_size); - -int ElementLogicalNotBool(const bool *input, bool *output, const int element_size); - -int ElementRound(const float *input, float *output, const int element_size); - -int ElementFloor(const float *input, float *output, const int element_size); - -int ElementCeil(const float *input, float *output, const int number); - -int ElementNegative(const float *input, float *output, const int element_size); - -int ElementReciprocal(const float *input, float *output, const int element_size); - -int ElementErf(const float *input, float *output, const int element_size); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_ARITHMETIC_SELF_H_ diff --git a/mindspore/lite/nnacl/fp32/batchnorm_fp32.h b/mindspore/lite/nnacl/fp32/batchnorm_fp32.h deleted file mode 100644 index 043c7c1335..0000000000 --- a/mindspore/lite/nnacl/fp32/batchnorm_fp32.h +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_BATCHNORM_H_ -#define MINDSPORE_LITE_NNACL_FP32_BATCHNORM_H_ - -#include "nnacl/batchnorm_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void BatchNormFp32(const void *input, const void *mean, const void *variance, const BatchNormParameter *param, - int task_id, void *output); -void FusedBatchNormFp32(const void *input, const void *scale, const void *offset, const void *mean, - const void *variance, const BatchNormParameter *param, int task_id, void *output); - -void FusedBatchNormFp32MeanVar(const float *input, float *run_mean, float *run_var, const BatchNormParameter *param, - float *save_mean, float *save_var); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FUSED_BATCHNORM_H_ diff --git a/mindspore/lite/nnacl/fp32/broadcast_to_fp32.h b/mindspore/lite/nnacl/fp32/broadcast_to_fp32.h deleted file mode 100644 index 5c354be667..0000000000 --- a/mindspore/lite/nnacl/fp32/broadcast_to_fp32.h +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_BROADCAST_TO_FP32_H_ -#define MINDSPORE_LITE_NNACL_FP32_BROADCAST_TO_FP32_H_ - -#include "nnacl/op_base.h" -#include "nnacl/broadcast_to_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -int BroadcastTo(const float *input, BroadcastShapeInfo *shape_info, float *output); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_BROADCAST_TO_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/common_func_fp32.h b/mindspore/lite/nnacl/fp32/common_func_fp32.h deleted file mode 100644 index fc6f75ebe9..0000000000 --- a/mindspore/lite/nnacl/fp32/common_func_fp32.h +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_COMMON_FUNC_H_ -#define MINDSPORE_LITE_NNACL_FP32_COMMON_FUNC_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/conv_parameter.h" - -typedef struct ConvDwFp32BorderParam { - float *dst; - const float *src; - const float *weight; - const float *bias; - size_t height; - size_t width; - size_t in_kh_step; - size_t in_kw_step; - size_t kernel_w; - size_t relu; - size_t relu6; -} ConvDwFp32BorderParam; - -#ifdef __cplusplus -extern "C" { -#endif - -void PostConvFuncFp32C8(const float *c8_out_ptr, float *out_ptr, const float *bias_ptr, size_t output_channel, - size_t plane_size, size_t stride, size_t relu_type); -void PostConvFuncFp32C4(const float *c4_out_ptr, float *out_ptr, const float *bias_ptr, size_t output_channel, - size_t plane_size, size_t plane_stride, size_t relu_type); - -void WinogradTransLeft(const float *S, const float *B, float *M, size_t w, size_t h, size_t k, size_t length); -void WinogradTransRight(const float *S, const float *B, float *M, size_t w, size_t h, size_t k, size_t length); - -#if defined(ENABLE_ARM) || defined(ENABLE_SSE) -void ConvDwFp32Center(float *dst, const float *src, const float *weight, const float *bias, size_t height, size_t width, - size_t kernel_h, size_t kernel_w, size_t out_h_step, size_t block_channel, size_t in_sh_step, - size_t in_sw_step, size_t in_kh_step, size_t in_kw_step, size_t relu, size_t relu6); -#ifdef ENABLE_AVX -void ConvDwFp32Border(ConvDwFp32BorderParam *param); -#else -void ConvDwFp32Border(float *dst, const float *src, const float *weight, const float *bias, size_t height, size_t width, - size_t in_kh_step, size_t in_kw_step, size_t kernel_w, size_t relu, size_t relu6); -#endif -void DeconvDwFp32Center(float *dst, const float *src, const float *weight, size_t height, size_t width, size_t kernel_h, - size_t kernel_w, size_t out_h_step, size_t block_channel, size_t in_sh_step, size_t in_sw_step, - size_t in_kh_step, size_t in_kw_step); -void PostFuncBiasReluC8(float *dst, const float *src, const float *bias, size_t oc8div, size_t oc8mod, - size_t plane_size, size_t stride, size_t relu_type); -void ConvDwFp32Row(float *output_ptr, const float *input_ptr, const float *weight_ptr, size_t num_pixels, - size_t output_channel, size_t input_step); -void PostFuncBiasReluC4(float *dst, const float *src, const float *bias, size_t oc4div, size_t oc4mod, - size_t plane_size, size_t plane_stride, size_t relu_type); -#endif - -#ifdef ENABLE_ARM64 -void BiasAdd(const float *bias, float *data, size_t oc4, size_t plan_size); -void BiasAddRelu6(const float *bias, float *data, size_t oc4, size_t plan_size); -void BiasAddRelu(const float *bias, float *data, size_t oc4, size_t plan_size); -void Relu6(float *data, size_t element4); -void Relu(float *data, size_t element4); - -void DeconvDwFp32Border(float *dst, const float *src, const float *weight, size_t height, size_t width, - size_t in_kh_step, size_t in_kw_step, size_t kernel_w); - -void ConvSwFp32Center(float *dst, const float *src, const float *weight, const float *bias, size_t height, size_t width, - size_t kernel_h, size_t kernel_w, size_t out_h_step, size_t block_channel, size_t ic4, - size_t in_sh_step, size_t in_sw_step, size_t in_kh_step, size_t in_kw_step, size_t relu, - size_t relu6); - -void ConvDw3x3Stride1(float *output, const float *buffer, const float *weight, const float *bias, int col_size, - int row_size, int channel, int output_h, int output_w, size_t relu, size_t relu6); - -void ConvDw3x3Stride2(float *output, const float *buffer, const float *weight, const float *bias, int col_size, - int row_size, int channel, int output_h, int output_w, size_t relu, size_t relu6); - -void ConvDw3x3Corner(float *dst, const float *src, const float *weight, const float *bias, int in_kh_step, - int in_kw_step, int channel, size_t relu, size_t relu6); - -void ConvDw3x3Vertical(float *dst, const float *src, const float *weight, const float *bias, int in_kh_step, - int in_kw_step, int channel, size_t relu, size_t relu6); - -void ConvDw3x3Horizontal(float *dst, const float *src, const float *weight, const float *bias, int in_kh_step, - int in_kw_step, int channel, size_t relu, size_t relu6); -#endif - -#ifdef __cplusplus -} -#endif -#endif /* MINDSPORE_LITE_NNACL_FP32_COMMON_FUNC_H_ */ diff --git a/mindspore/lite/nnacl/fp32/constant_of_shape_fp32.h b/mindspore/lite/nnacl/fp32/constant_of_shape_fp32.h deleted file mode 100644 index 672d30a5e5..0000000000 --- a/mindspore/lite/nnacl/fp32/constant_of_shape_fp32.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2020-2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_CONSTANT_OF_SHAPE_FP32_H_ -#define MINDSPORE_LITE_NNACL_FP32_CONSTANT_OF_SHAPE_FP32_H_ -#include -#include -#include "nnacl/op_base.h" -#include "nnacl/errorcode.h" -#include "nnacl/constant_of_shape_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -inline int ConstantOfShapeInt32(int32_t *output, int start, int end, int32_t value) { - for (int i = start; i < end; i++) { - output[i] = value; - } - return NNACL_OK; -} - -inline int ConstantOfShapeFp32(float *output, int start, int end, float value) { - for (int i = start; i < end; i++) { - output[i] = value; - } - return NNACL_OK; -} - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_CONSTANT_OF_SHAPE_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/conv_common_fp32.h b/mindspore/lite/nnacl/fp32/conv_common_fp32.h deleted file mode 100644 index fe5bb1b9d1..0000000000 --- a/mindspore/lite/nnacl/fp32/conv_common_fp32.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_CONV_COMMON_H_ -#define MINDSPORE_LITE_NNACL_FP32_CONV_COMMON_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/pack.h" -#include "nnacl/op_base.h" -#include "nnacl/common_func.h" -#include "nnacl/conv_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -// fp32 convolution common (im2col+gemm) -void ConvFp32(const float *input_data, float *packed_input, const float *packed_weight, const float *bias_data, - float *col_major_input, float *output_data, int task_id, const ConvParameter *conv_param); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_CONV_COMMON_H_ diff --git a/mindspore/lite/nnacl/fp32/conv_depthwise_fp32.h b/mindspore/lite/nnacl/fp32/conv_depthwise_fp32.h deleted file mode 100644 index 26ba01e895..0000000000 --- a/mindspore/lite/nnacl/fp32/conv_depthwise_fp32.h +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_CONV_DEPTHWISE_H_ -#define MINDSPORE_LITE_NNACL_FP32_CONV_DEPTHWISE_H_ - -#include "nnacl/conv_parameter.h" - -#ifndef ENABLE_ARM64 -void DepthwiseCenter(float *dst, const float *src, const float *weight, const float *bias, int height, int width, - int kernel_h, int kernel_w, int out_h_step, int block_channel, int in_sh_step, int in_sw_step, - int in_kh_step, int in_kw_step, bool is_relu, bool is_relu6); -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -void ConvDw(float *output_data, const float *input_data, const float *weight_data, const float *bias_data, - const ConvParameter *conv_param, int task_id); - -void InitSlidingParam(SlidingWindowParam *sliding, const ConvParameter *conv_param, int block); - -void InitSlidingParamConv(SlidingWindowParam *sliding, const ConvParameter *conv_param, int block); - -void AppendSlidingParamConv(SlidingWindowParam *sliding, const ConvParameter *conv_param, int block); - -void InitSlidingParamConvDw(SlidingWindowParam *sliding, const ConvParameter *conv_param, int block); - -void AppendSlidingParamConvDw(SlidingWindowParam *sliding, const ConvParameter *conv_param, int block); - -void ConvDwSWFp32(float *output_data, const float *input_data, const float *weight_data, const float *bias_data, - const ConvParameter *conv_param, const SlidingWindowParam *sliding, int task_id); - -bool CheckConvDwUse3X3(const ConvParameter *conv_param); - -bool CheckConvDwUseIndirectBuffer(const ConvParameter *conv_param); - -void ConvDwInitIndirection(float **indirect_buffer, float *src, float *zero_ptr, const ConvParameter *conv_param, - int step_h, int step_w); - -#ifdef ENABLE_ARM64 -void ConvDwFp32Indirect3x3(float *output, float **input, const float *weights, const float *bias, int channels, - int output_width, size_t input_stride, size_t relu, size_t relu6); - -void ConvDwFp32Indirect5x5(float *output, float **input, const float *weights, const float *bias, int channels, - int output_width, size_t input_stride, size_t relu, size_t relu6); -#endif - -#ifdef ENABLE_AVX -void ConvDwFp32Avx3x3(float *output, float **input, const float *weights, const float *bias, size_t channels, - size_t output_width, size_t input_stride, size_t relu, size_t relu6); - -void ConvDwFp32Avx5x5(float *output, float **input, const float *weights, const float *bias, size_t channels, - size_t output_width, size_t input_stride, size_t relu, size_t relu6); -#endif - -#if defined(ENABLE_ARM) || (defined(ENABLE_SSE) && !defined(ENABLE_AVX)) -void ConvDw3x3(float *output_data, float *buffer, const float *input_data, const float *weight_data, - const float *bias_data, const ConvParameter *conv_param, int start_oh, int end_oh); - -bool CheckConvDw1DWinograd(const ConvParameter *conv_param, int thread_num); -#endif - -void ConvDwFp32IndirectRow(float *output, float **input, const float *weights, const float *bias, int channels, - int output_width, int input_stride, bool relu, bool relu6, int kernel); - -void ConvDwIndirection(float *output_data, float **indirect_buffer, const float *weight_data, const float *bias_data, - float *zero_ptr, const ConvParameter *conv_param, int task_id); - -void DeconvDwSWFp32(float *output_data, const float *input_data, const float *weight_data, const float *bias_data, - const ConvParameter *conv_param, const SlidingWindowParam *sliding, int task_id); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_CONV_DEPTHWISE_H_ diff --git a/mindspore/lite/nnacl/fp32/conv_winograd_fp32.h b/mindspore/lite/nnacl/fp32/conv_winograd_fp32.h deleted file mode 100644 index c4cee273f0..0000000000 --- a/mindspore/lite/nnacl/fp32/conv_winograd_fp32.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_CONV_WINOGRAD_H_ -#define MINDSPORE_LITE_NNACL_FP32_CONV_WINOGRAD_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/pack.h" -#include "nnacl/op_base.h" -#include "nnacl/common_func.h" -#include "nnacl/conv_parameter.h" -#include "nnacl/fp32/winograd_utils.h" -#include "nnacl/fp32/conv_depthwise_fp32.h" - -typedef float *TmpBufferAddress; - -#ifdef __cplusplus -extern "C" { -#endif - -// fp32 convolution winograd -void ConvWinogardFp32(const float *input_data, const float *trans_weight, const float *bias_data, float *output_data, - TmpBufferAddress *buffer_list, int task_id, const ConvParameter *conv_param, - InputTransFunc in_func, OutputTransFunc out_func); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_CONV_WINOGRAD_H_ diff --git a/mindspore/lite/nnacl/fp32/crop_fp32.h b/mindspore/lite/nnacl/fp32/crop_fp32.h deleted file mode 100644 index 793c7ab8d8..0000000000 --- a/mindspore/lite/nnacl/fp32/crop_fp32.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_CROP_H_ -#define MINDSPORE_LITE_NNACL_FP32_CROP_H_ -#include "nnacl/op_base.h" -#include "nnacl/crop_parameter.h" - -#define CROP_OFFSET_MAX_SIZE 4 - -#ifdef __cplusplus -extern "C" { -#endif -void Crop4D(const float *input, float *output, const int *in_shape, const int *out_shape, - const CropParameter *crop_param, int thread_id); -void Crop4DNoParallel(const float *input, float *output, const int *in_shape, const int *out_shape, - const CropParameter *crop_param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_CROP_H_ diff --git a/mindspore/lite/nnacl/fp32/deconv_fp32.h b/mindspore/lite/nnacl/fp32/deconv_fp32.h deleted file mode 100644 index 2cd7ea3b02..0000000000 --- a/mindspore/lite/nnacl/fp32/deconv_fp32.h +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_DECONV_H_ -#define MINDSPORE_LITE_NNACL_FP32_DECONV_H_ - -#include -#include "nnacl/pack.h" -#include "nnacl/op_base.h" -#include "nnacl/conv_parameter.h" -#include "nnacl/errorcode.h" -#include "nnacl/fp32/common_func_fp32.h" -#include "nnacl/base/minimal_filtering_generator.h" - -#ifdef __cplusplus -extern "C" { -#endif -void PackDeConvWeightFp32(const float *weight, float *dst, int input_channel, int output_channel, int plane); -void DeConvPostFp32C8(const float *src, float *tmp_out, const float *bias, float *dst, int output_channel, - const ConvParameter *conv_param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_DECONV_H_ diff --git a/mindspore/lite/nnacl/fp32/deconv_winograd_fp32.h b/mindspore/lite/nnacl/fp32/deconv_winograd_fp32.h deleted file mode 100644 index a2056dd36d..0000000000 --- a/mindspore/lite/nnacl/fp32/deconv_winograd_fp32.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_DECONV_WINOGRAD_H_ -#define MINDSPORE_LITE_NNACL_FP32_DECONV_WINOGRAD_H_ - -#include -#include "nnacl/pack.h" -#include "nnacl/op_base.h" -#include "nnacl/conv_parameter.h" -#include "nnacl/errorcode.h" -#include "nnacl/fp32/common_func_fp32.h" -#include "nnacl/base/minimal_filtering_generator.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int PackDeConvWgDataFp32(const float *nhwc_weight, DeConvComputeUnit *unit, const ConvParameter *conv_param, - const DeConvParam *deconv_param); -void DeconvWg(const float *nhwc_input_, float *tile_in, float *tile_out, int start_index, int calculate_count, - const ConvParameter *conv_param, DeConvParam *deconv_param, int task_id); -void DeconvWgPost(const float *tile_out, float *nc4hw4_output, const ConvParameter *conv_param, - const DeConvParam *deconv_param, int calculate_count, int tile_index); -void TiledC4MatmulFp32(float *dst, const float *src, const float *weight, size_t ic4, size_t cal_num, size_t oc4); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_DECONV_WINOGRAD_H_ diff --git a/mindspore/lite/nnacl/fp32/detection_post_process_fp32.h b/mindspore/lite/nnacl/fp32/detection_post_process_fp32.h deleted file mode 100644 index 60b1b0a469..0000000000 --- a/mindspore/lite/nnacl/fp32/detection_post_process_fp32.h +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_DETECTION_POST_PROCESS_H_ -#define MINDSPORE_LITE_NNACL_FP32_DETECTION_POST_PROCESS_H_ - -#include "nnacl/op_base.h" -#include "nnacl/detection_post_process_parameter.h" - -typedef struct { - float y; - float x; - float h; - float w; -} BboxCenter; - -typedef struct { - float ymin; - float xmin; - float ymax; - float xmax; -} BboxCorner; - -#ifdef __cplusplus -extern "C" { -#endif -int DecodeBoxes(int num_boxes, const float *input_boxes, const float *anchors, - const DetectionPostProcessParameter *param); - -int NmsMultiClassesFastCore(const int num_boxes, const int num_classes_with_bg, const float *input_scores, - void (*)(const float *, int *, int, int), const DetectionPostProcessParameter *param, - const int task_id, const int thread_num); - -int DetectionPostProcessFast(const int num_boxes, const int num_classes_with_bg, const float *input_scores, - const float *decoded_boxes, float *output_boxes, float *output_classes, - float *output_scores, float *output_num, void (*)(const float *, int *, int, int), - const DetectionPostProcessParameter *param); - -int DetectionPostProcessRegular(const int num_boxes, const int num_classes_with_bg, const float *input_scores, - float *output_boxes, float *output_classes, float *output_scores, float *output_num, - void (*)(const float *, int *, int, int), const DetectionPostProcessParameter *param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_DETECTION_POST_PROCESS_H_ diff --git a/mindspore/lite/nnacl/fp32/div_fp32.h b/mindspore/lite/nnacl/fp32/div_fp32.h deleted file mode 100644 index 755d678d55..0000000000 --- a/mindspore/lite/nnacl/fp32/div_fp32.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_DIV_H_ -#define MINDSPORE_LITE_NNACL_FP32_DIV_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" -#include "nnacl/base/arithmetic_base.h" -#include "nnacl/errorcode.h" - -#ifdef __cplusplus -extern "C" { -#endif -int ElementDiv(const float *in0, const float *in1, float *out, int size); -int ElementDivRelu(const float *in0, const float *in1, float *out, int size); -int ElementDivRelu6(const float *in0, const float *in1, float *out, int size); -int ElementOptDiv(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); -int ElementOptDivRelu(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); -int ElementOptDivRelu6(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); -int ElementOptDivInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); -int BroadcastDiv(const float *in0, const float *in1, float *tile_in0, float *tile_in1, float *out, int size, - ArithmeticParameter *param); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_DIV_H_ diff --git a/mindspore/lite/nnacl/fp32/elu_fp32.h b/mindspore/lite/nnacl/fp32/elu_fp32.h deleted file mode 100644 index 2881aefa0c..0000000000 --- a/mindspore/lite/nnacl/fp32/elu_fp32.h +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_ELU_H_ -#define MINDSPORE_LITE_NNACL_FP32_ELU_H_ - -#include "nnacl/op_base.h" - -typedef struct EluParameter { - OpParameter op_parameter_; - // primitive parameter - float alpha_; - - // shape correlative - int in_size_; -} EluParameter; - -#ifdef __cplusplus -extern "C" { -#endif -int Elu(const float *input_data, float *output_data, const EluParameter *parameter, int task_id); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_ELU_H_ diff --git a/mindspore/lite/nnacl/fp32/embedding_lookup_fp32.h b/mindspore/lite/nnacl/fp32/embedding_lookup_fp32.h deleted file mode 100644 index a6e655d841..0000000000 --- a/mindspore/lite/nnacl/fp32/embedding_lookup_fp32.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_EMBEDDING_LOOKUP_H_ -#define MINDSPORE_LITE_NNACL_FP32_EMBEDDING_LOOKUP_H_ - -#include "nnacl/op_base.h" - -typedef struct EmbeddingLookupParameter { - OpParameter op_parameter_; - // primitive parameter - float max_norm_; - - // shape correlative - bool *is_regulated_; - int ids_size_; - int layer_size_; - int layer_num_; -} EmbeddingLookupParameter; - -#ifdef __cplusplus -extern "C" { -#endif -int EmbeddingLookup(float *input_data, const int *ids, float *output_data, const EmbeddingLookupParameter *parameter, - int task_id); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_EMBEDDING_LOOKUP_H_ diff --git a/mindspore/lite/nnacl/fp32/exp_fp32.h b/mindspore/lite/nnacl/fp32/exp_fp32.h deleted file mode 100644 index 061a4d8a57..0000000000 --- a/mindspore/lite/nnacl/fp32/exp_fp32.h +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_EXP_H_ -#define MINDSPORE_LITE_NNACL_FP32_EXP_H_ - -#include "nnacl/op_base.h" - -typedef struct ExpParameter { - // Primitive parameter - OpParameter op_parameter_; - float base_; - float scale_; - float shift_; - // other parameter - int thread_num_; - float in_scale_; - float out_scale_; - int element_num_; -} ExpParameter; - -#ifdef __cplusplus -extern "C" { -#endif -int Exp(const float *input_data, float *output_data, const ExpParameter *parameter, int task_id); -void ExpFp32(const float *src, float *dst, int num); - -#if defined(ENABLE_ARM) || defined(ENABLE_SSE) -static inline void simd_exp(MS_FLOAT32X4 input, float *dst) { - static MS_FLOAT32X4 maxv = {88.0f, 88.0f, 88.0f, 88.0f}; - static MS_FLOAT32X4 minv = {-88.0f, -88.0f, -88.0f, -88.0f}; - static MS_FLOAT32X4 param[] = {{0.693147f, 0.693147f, 0.693147f, 0.693147f}, - {1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120}, - {1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24}, - {1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6}, - {0.5f, 0.5f, 0.5f, 0.5f}, - {1.0f, 1.0f, 1.0f, 1.0f}}; - - input = MS_MAXQ_F32(minv, MS_MINQ_F32(input, maxv)); - MS_INT32X4 integer = MS_CVTQPS_EPI32(input / param[0]); - MS_FLOAT32X4 decimal = input - MS_CVTQEPI32_PS(integer) * param[0]; - MS_INT32X4 int_exp = MS_SLLIQ_EPI32(MS_ADDQ_EPI32(integer, MS_MOVQ_EPI32(127)), 23); - MS_FLOAT32X4 decimal_exp = - param[5] + - decimal * (param[5] + decimal * (param[4] + decimal * (param[3] + decimal * (param[2] + decimal * param[1])))); - MS_STQ_F32(dst, decimal_exp * MS_CAST_F32_S32(int_exp)); -} -#endif - -#if defined(ENABLE_AVX) -static inline void simd_exp_avx(MS_FLOAT32X8 input, float *dst) { - static MS_FLOAT32X8 maxv = {88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f, 88.0f}; - static MS_FLOAT32X8 minv = {-88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f, -88.0f}; - static MS_FLOAT32X8 param[] = { - {0.693147f, 0.693147f, 0.693147f, 0.693147f, 0.693147f, 0.693147f, 0.693147f, 0.693147f}, - {1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120, 1.0f / 120}, - {1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24, 1.0f / 24}, - {1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6, 1.0f / 6}, - {0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f}, - {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}; - input = MS_MAX256_F32(minv, MS_MIN256_F32(input, maxv)); - MS_INT32X8 integer = MS_CVT256PS_EPI32(input / param[0]); - MS_FLOAT32X8 decimal = input - MS_CVT256EPI32_PS(integer) * param[0]; - MS_INT32X8 int_exp = MS_SLLI256_EPI32(MS_ADD256_EPI32(integer, MS_MOV256_EPI32(127)), 23); - MS_FLOAT32X8 decimal_exp = - param[5] + - decimal * (param[5] + decimal * (param[4] + decimal * (param[3] + decimal * (param[2] + decimal * param[1])))); - MS_ST256_F32(dst, decimal_exp * MS_CAST256_F32_S32(int_exp)); -} -#endif - -static inline void single_exp(float src, float *dst) { - typedef union { - float f; - int i; - } fi; - static float param[] = {0.693147f, 1.0f / 120, 1.0f / 24, 1.0f / 6, 1.0f / 2, 1.0f}; // log(2.0f) - src = MSMAX(-88.0f, MSMIN(88.0f, src)); - int integer = src / param[0]; - float decimal = src - integer * param[0]; - fi int_exp = {.i = (integer + 127) << 23}; - float decimal_exp = - 1.0f + decimal * (1.0f + decimal * (0.5f + decimal * (param[3] + decimal * (param[2] + decimal * param[1])))); - *dst = int_exp.f * decimal_exp; -} -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_EXP_H_ diff --git a/mindspore/lite/nnacl/fp32/gatherNd_fp32.h b/mindspore/lite/nnacl/fp32/gatherNd_fp32.h deleted file mode 100644 index ec4f4c4da3..0000000000 --- a/mindspore/lite/nnacl/fp32/gatherNd_fp32.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_GATHERND_H_ -#define MINDSPORE_LITE_NNACL_GATHERND_H_ - -#include "nnacl/op_base.h" - -typedef struct GatherNdParameter { - // Primitive parameter - OpParameter op_parameter_; -} GatherNdParameter; - -#ifdef __cplusplus -extern "C" { -#endif -int GatherNd(const float *input, float *output, const int *in_offset, int area, int count); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_GATHERND_H_ diff --git a/mindspore/lite/nnacl/fp32/gru_fp32.h b/mindspore/lite/nnacl/fp32/gru_fp32.h deleted file mode 100644 index 3333eafd9c..0000000000 --- a/mindspore/lite/nnacl/fp32/gru_fp32.h +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_GRU_FP32_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRU_FP32_H_ -#include "nnacl/gru_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -void Gru(float *output, const float *input, const float *weight_g, const float *weight_r, const float *input_bias, - const float *state_bias, float *hidden_state, float *buffer[4], int check_seq_len, - const GruParameter *gru_parm); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_GRU_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/instance_norm_fp32.h b/mindspore/lite/nnacl/fp32/instance_norm_fp32.h deleted file mode 100644 index b00b7491f9..0000000000 --- a/mindspore/lite/nnacl/fp32/instance_norm_fp32.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_INSTANCE_NORM_H_ -#define MINDSPORE_LITE_NNACL_FP32_INSTANCE_NORM_H_ - -#include "nnacl/op_base.h" -#include "nnacl/instance_norm_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int InstanceNorm(const float *src_data, float *dst_data, const float *gamma_data, const float *beta_data, - const InstanceNormParameter *param, size_t task_id); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_INSTANCE_NORM_H_ diff --git a/mindspore/lite/nnacl/fp32/invert_permutation_fp32.h b/mindspore/lite/nnacl/fp32/invert_permutation_fp32.h deleted file mode 100644 index 7151cdfddc..0000000000 --- a/mindspore/lite/nnacl/fp32/invert_permutation_fp32.h +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INVERT_PERMUTATION_FP32_H_ -#define MINDSPORE_LITE_NNACL_INVERT_PERMUTATION_FP32_H_ - -#ifdef __cplusplus -extern "C" { -#endif -void InvertPermutation(const int *input, int *output, int num); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INVERT_PERMUTATION_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/l2_norm_fp32.h b/mindspore/lite/nnacl/fp32/l2_norm_fp32.h deleted file mode 100644 index 4771d382f1..0000000000 --- a/mindspore/lite/nnacl/fp32/l2_norm_fp32.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_L2NORM_FP32_H_ -#define MINDSPORE_LITE_NNACL_FP32_L2NORM_FP32_H_ - -#include "nnacl/l2_norm_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -int CalcThreadSquareSum(const float *input_ptr, float *sum, int begin, int end); -int ThreadDivSqrtSum(const float *input_ptr, float *output_ptr, const L2NormParameter *param, const float sqrt_sum, - const int begin, const int end); -int ThreadTrailingAxis(const float *input_ptr, float *output_ptr, const L2NormParameter *param, const int begin, - const int end); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_L2NORM_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/layer_norm_fp32.h b/mindspore/lite/nnacl/fp32/layer_norm_fp32.h deleted file mode 100644 index 3849fa125b..0000000000 --- a/mindspore/lite/nnacl/fp32/layer_norm_fp32.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_LAYER_NORM_FP32_H_ -#define MINDSPORE_LITE_NNACL_FP32_LAYER_NORM_FP32_H_ - -#include "nnacl/op_base.h" -#include "nnacl/layer_norm_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int LayerNorm(const float *src_data, const float *gamma_data, const float *beta_data, float *dst_data, float *out_mean, - float *out_deno, LayerNormParameter *param, size_t task_id); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_LAYER_NORM_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/local_response_norm_fp32.h b/mindspore/lite/nnacl/fp32/local_response_norm_fp32.h deleted file mode 100644 index 57046bb32d..0000000000 --- a/mindspore/lite/nnacl/fp32/local_response_norm_fp32.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_LOCAL_RESPONSE_NORM_H_ -#define MINDSPORE_LITE_NNACL_LOCAL_RESPONSE_NORM_H_ - -#include "nnacl/op_base.h" - -typedef struct LocalResponseNormParameter { - // Primitive parameter - OpParameter op_parameter_; - int depth_radius_; - float bias_; - float alpha_; - float beta_; -} LocalResponseNormParameter; - -#ifdef __cplusplus -extern "C" { -#endif -int LocalResponseNorm(const float *input_ptr, int out_size, int channel, float *output_ptr, - const LocalResponseNormParameter *param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_LOCAL_RESPONSE_NORM_H_ diff --git a/mindspore/lite/nnacl/fp32/lstm_fp32.h b/mindspore/lite/nnacl/fp32/lstm_fp32.h deleted file mode 100644 index 8a9d8276dc..0000000000 --- a/mindspore/lite/nnacl/fp32/lstm_fp32.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_LSTM_H_ -#define MINDSPORE_LITE_NNACL_FP32_LSTM_H_ - -#include "nnacl/lstm_parameter.h" -#ifdef __cplusplus -extern "C" { -#endif -void PackLstmWeight(float *dst, const float *src, int batch, int deep, int col, int col_align); - -void PackLstmBias(float *dst, const float *src, int batch, int col, int col_align, bool is_bidirectional); - -void PackLstmInput(const float *src, float *dst, int row, int deep); - -void LstmMatMul(float *c, const float *a, const float *b, const float *bias, int row, int deep, int col, bool is_vec); - -void ElementMulAcc(const float *input0, const float *input1, float *output, int element_size); - -int ElementOptMulAcc(const float *input0, const float input1, float *output, const int element_size); - -void Lstm(float *output, const float *input, const float *weight_i, const float *weight_h, const float *input_bias, - const float *state_bias, float *hidden_state, float *cell_state, float *buffer[6], - const LstmParameter *lstm_param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_LSTM_H_ diff --git a/mindspore/lite/nnacl/fp32/matmul_fp32.h b/mindspore/lite/nnacl/fp32/matmul_fp32.h deleted file mode 100644 index 4a629eeadd..0000000000 --- a/mindspore/lite/nnacl/fp32/matmul_fp32.h +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_MATMUL_H_ -#define MINDSPORE_LITE_NNACL_FP32_MATMUL_H_ - -#include -#include -#include "nnacl/errorcode.h" -#include "nnacl/matmul_parameter.h" -#include "nnacl/op_base.h" - -#define ADD_BIAS(value, bias, c) \ - if (bias != NULL) value = value + bias[c]; - -#define DO_RELU(value, act_type) \ - if (act_type == ActType_Relu) value = MSMAX(0.0f, value); - -#define DO_RELU6(value, act_type) \ - if (act_type == ActType_Relu6) value = MSMIN(6.0f, value); \ - if (act_type == ActType_Relu6) value = MSMAX(0.0f, value); - -#ifdef __cplusplus -extern "C" { -#endif -void MatMulOpt(const float *a, const float *b, float *c, const float *bias, ActType act_type, int deep, int row, - int col, size_t stride, int out_type); -void MatVecMulFp32(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int col); - -void RowMajor2ColMajor(const float *src_ptr, float *dst_ptr, int row, int col); -void RowMajor2Row4Major(const float *src_ptr, float *dst_ptr, int row, int col); -void RowMajor2Row6Major(const float *src_ptr, float *dst_ptr, int row, int col); -void RowMajor2Row8Major(const float *src_ptr, float *dst_ptr, int row, int col); -void RowMajor2Row12Major(const float *src_ptr, float *dst_ptr, int row, int col); -void RowMajor2Row16Major(const float *src_ptr, float *dst_ptr, int row, int col); -void RowMajor2Col4Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); -void RowMajor2Col6Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); -void RowMajor2Col8Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); -void RowMajor2Col12Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); -void RowMajor2Col16Major(const float *src_ptr, float *dst_ptr, size_t row, size_t col); - -#ifdef ENABLE_ARM64 -void MatmulFloatNeon64(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, - int col, size_t stride, size_t writeNhwc, size_t WriteWino); -void MatmulFloatNeon64Opt(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, - int col, size_t stride, size_t write_mode); -#elif ENABLE_ARM32 -void MatmulFloatNeon32(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, - int col, int stride, size_t writeNhwc, size_t WriteWino); -void MatmulFloatNeon32Opt(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, - int col, int stride, int write_mode); -void MatmulFloatNeon32Opt12x4(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, - int row, int col, int stride, int write_mode); -#elif ENABLE_SSE -#include -void MatmulFloatSse64(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, - int col, int stride, size_t writeNhwc, size_t WriteWino); -void MatmulFloatSse64Opt(const float *a, const float *b, float *c, const float *bias, int act_type, int depth, int row, - int col, int stride, int write_mode); -#ifdef ENABLE_AVX -void MatmulFloatAvxOpt(const float *a, const float *b, float *c, const float *bias, size_t act_type, size_t depth, - size_t row, size_t col, size_t stride, size_t write_mode); -#endif -#endif - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_MATMUL_H_ diff --git a/mindspore/lite/nnacl/fp32/mul_fp32.h b/mindspore/lite/nnacl/fp32/mul_fp32.h deleted file mode 100644 index 0ff54a43c8..0000000000 --- a/mindspore/lite/nnacl/fp32/mul_fp32.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_MUL_H_ -#define MINDSPORE_LITE_NNACL_FP32_MUL_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" -#include "nnacl/base/arithmetic_base.h" -#include "nnacl/errorcode.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ElementMul(const float *in0, const float *in1, float *out, int size); -int ElementMulRelu(const float *in0, const float *in1, float *out, int size); -int ElementMulRelu6(const float *in0, const float *in1, float *out, int size); -int ElementMulInt(const int *in0, const int *in1, int *out, int size); -int ElementMulReluInt(const int *in0, const int *in1, int *out, int size); -int ElementMulRelu6Int(const int *in0, const int *in1, int *out, int size); -int ElementOptMul(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); -int ElementOptMulRelu(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); -int ElementOptMulRelu6(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); -int ElementOptMulInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); -int ElementOptMulReluInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); -int ElementOptMulRelu6Int(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); -int BroadcastMul(const float *in0, const float *in1, float *tile_in0, float *tile_in1, float *out, int size, - ArithmeticParameter *param); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_MUL_H_ diff --git a/mindspore/lite/nnacl/fp32/one_hot_fp32.h b/mindspore/lite/nnacl/fp32/one_hot_fp32.h deleted file mode 100644 index 7b2039bb08..0000000000 --- a/mindspore/lite/nnacl/fp32/one_hot_fp32.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_ONE_HOT_H_ -#define MINDSPORE_LITE_NNACL_FP32_ONE_HOT_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" - -typedef struct OneHotParameter { - // Primitive parameter - OpParameter op_parameter_; - int axis_; - // other parameter - int depth_; - float on_value_; - float off_value_; - int outer_size_; - int inner_size_; - bool support_neg_index_; // if true, support neg index in indices tensor; if false, set off_value on neg index. -} OneHotParameter; - -#ifdef __cplusplus -extern "C" { -#endif -int OneHot(const int *indices, float *output, const OneHotParameter *one_hot_param, const int tid, - const int thread_num); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_ONE_HOT_H_ diff --git a/mindspore/lite/nnacl/fp32/pack_fp32.h b/mindspore/lite/nnacl/fp32/pack_fp32.h deleted file mode 100644 index 230f4eb957..0000000000 --- a/mindspore/lite/nnacl/fp32/pack_fp32.h +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright 2020-2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_PACK_H_ -#define MINDSPORE_LITE_NNACL_FP32_PACK_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/conv_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void PackHWCToWHC(const float *src, float *dst, int height, int width, int channel); -void PackNHWCToNC4HW4Fp32(const void *src, void *dst, int batch, int plane, int channel); -void PackNCHWToNC4HW4Fp32(const void *src, void *dst, int batch, int plane, int channel); -void PackNHWCToNHWC4Fp32(const void *src, void *dst, int batch, int plane, int channel); -void PackNHWCToNHWC8Fp32(const void *src, void *dst, int batch, int plane, int channel); -// Note: If not multithreaded, please set task_id = 0 and thread_count = 0; -void PackNHWCToNCHWFp32(const void *src, void *dst, int batch, int plane, int channel, int task_id, int thread_count); -void PackNCHWToNHWCFp32(const void *src, void *dst, int batch, int plane, int channel, int task_id, int thread_count); -void PackNHWC4ToNHWCFp32(const void *src, void *dst, int batch, int plane, int channel); -void PackNC4HW4ToNHWC4Fp32(const void *src, void *dst, int batch, int plane, int channel); -void PackNC4HW4ToNHWCFp32(const void *src, void *dst, int batch, int plane, int channel); -void PackNHWCToC8HWN8Fp32(const void *src, void *dst, int batch, int plane, int channel); - -void PackWeightKHWToHWKFp32(const void *src, void *dst, int plane, int channel); -void PackDepthwiseIndirectWeightC4Fp32(const void *src, void *dst, int height, int width, int channel); -void PackDepthwiseIndirectWeightC8Fp32(const void *src, void *dst, int height, int width, int channel); -void Im2ColPackUnitFp32(const float *input_data, const ConvParameter *conv_param, float *packed_input, int real_cal_num, - int block_index); - -#if defined(ENABLE_ARM) || (defined(ENABLE_SSE) && !defined(ENABLE_AVX)) -void PackWeightConvDw3x3Fp32(const void *src, void *dst, int channel); -#endif - -// Transpose 8X8 Fp32 block data -typedef void (*Transpose8X8Fp32Func)(const float *src_ptr, float *dst_ptr, int src_stride, int dst_stride); -#ifdef ENABLE_ARM64 -void Transpose8X8Fp32Arm64(const float *src_ptr, float *dst_ptr, int src_stride, int dst_stride); -#endif -#ifdef ENABLE_ARM32 -void Transpose8X8Fp32Arm32(const float *src_ptr, float *dst_ptr, int src_stride, int dst_stride); -#endif -#ifdef ENABLE_AVX -void Transpose8X8Fp32Avx(const float *src_ptr, float *dst_ptr, int src_stride, int dst_stride); -#endif -#if defined(ENABLE_SSE) && !defined(ENABLE_AVX) -void Transpose8X8Fp32Sse(const float *src_ptr, float *dst_ptr, int src_stride, int dst_stride); -#endif - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_PAD_H_ diff --git a/mindspore/lite/nnacl/fp32/pad_fp32.h b/mindspore/lite/nnacl/fp32/pad_fp32.h deleted file mode 100644 index db5876f51a..0000000000 --- a/mindspore/lite/nnacl/fp32/pad_fp32.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_PAD_H_ -#define MINDSPORE_LITE_NNACL_FP32_PAD_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include -#include -#include "nnacl/op_base.h" -#include "nnacl/pad_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -void Pad(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, - const int *paddings, int tid, int thread_num); -void MirrorPad(const float *input_data, float *output_data, const int *input_shape, const PadParameter *pad_param, - int begin, int end); - -int TransOut2InputDimIndex(int out_dim_index, int left_pad, int in_dim, int offset); -int GetInputFlattenIndex(int out_flatten_index, const int *input_shape, const PadParameter *pad_param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_PAD_H_ diff --git a/mindspore/lite/nnacl/fp32/pooling_fp32.h b/mindspore/lite/nnacl/fp32/pooling_fp32.h deleted file mode 100644 index 71033e5072..0000000000 --- a/mindspore/lite/nnacl/fp32/pooling_fp32.h +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_POOLING_H_ -#define MINDSPORE_LITE_NNACL_FP32_POOLING_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" -#include "nnacl/pooling_parameter.h" -#include "nnacl/int8/quantize.h" - -#ifdef __cplusplus -extern "C" { -#endif -int AvgPooling(const float *input_ptr, float *output_ptr, const PoolingParameter *pooling_param, int task_id, - float minf, float maxf); -void MaxPooling(const float *input_ptr, float *output_ptr, const PoolingParameter *pooling_param, int task_id, - float minf, float maxf); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_POOLING_H_ diff --git a/mindspore/lite/nnacl/fp32/power_fp32.h b/mindspore/lite/nnacl/fp32/power_fp32.h deleted file mode 100644 index 80e12e54ca..0000000000 --- a/mindspore/lite/nnacl/fp32/power_fp32.h +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Copyright 2020-2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_POWER_FP32_H_ -#define MINDSPORE_LITE_NNACL_FP32_POWER_FP32_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/power_parameter.h" - -#if defined(ENABLE_ARM) || defined(ENABLE_AVX) || defined(ENABLE_SSE) -typedef MS_FLOAT32X4 (*PowerSimdFun)(MS_FLOAT32X4 x, const void *exponent); -#endif -typedef void (*PowerFun)(const float *, const float *, float *, int, float, float); -typedef float (*PowerScalarFun)(float x, const void *exponent); - -#ifdef __cplusplus -extern "C" { -#endif -static inline bool CheckInteger(float f) { return floorf(f) == f; } - -static inline float StdPowerScalar(float x, const void *exponent) { return powf(x, *(float *)exponent); } - -#if defined(ENABLE_ARM) || defined(ENABLE_AVX) || defined(ENABLE_SSE) -static inline MS_FLOAT32X4 StdPowerSimd(MS_FLOAT32X4 x, const void *exponent) { - MS_FLOAT32X4 result; - for (int i = 0; i < 4; ++i) { - result[i] = powf(x[i], *(float *)exponent); - } - return result; -} -#endif -int Power(const float *input, const float *exponent, float *output, int len, float scale, float shift, bool broadcast); -void PowerSingle(const float *input, const float *exponent, float *output, int len, float scale, float shift); -void PowerBroadCast(const float *input, const float *exponent, float *output, int len, float scale, float shift); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_POWER_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/prelu_fp32.h b/mindspore/lite/nnacl/fp32/prelu_fp32.h deleted file mode 100644 index 1203b2e9a0..0000000000 --- a/mindspore/lite/nnacl/fp32/prelu_fp32.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_PRELU_H_ -#define MINDSPORE_LITE_NNACL_FP32_PRELU_H_ - -#include "nnacl/op_base.h" -#include "nnacl/prelu_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -void PRelu(const float *input, float *output, float *slope, int start, int end, int channel); - -void PReluShareChannel(const float *input, float *output, float slope, int start, int end); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_PRELU_H_ diff --git a/mindspore/lite/nnacl/fp32/prior_box_fp32.h b/mindspore/lite/nnacl/fp32/prior_box_fp32.h deleted file mode 100644 index d37ca65956..0000000000 --- a/mindspore/lite/nnacl/fp32/prior_box_fp32.h +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_PRIOR_BOX_FP32_H_ -#define MINDSPORE_LITE_NNACL_FP32_PRIOR_BOX_FP32_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/errorcode.h" -#include "nnacl/prior_box_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -static int PriorBox(const float *input_data, float *output_data, const size_t size, const int tid, - const int thread_num) { - size_t unit_size = size / thread_num; - size_t copy_size = (tid == thread_num - 1) ? size - unit_size * tid : unit_size; - (void)memcpy(output_data + tid * unit_size, input_data + tid * unit_size, copy_size * sizeof(float)); - return NNACL_OK; -} -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_PRIOR_BOX_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/range_fp32.h b/mindspore/lite/nnacl/fp32/range_fp32.h deleted file mode 100644 index dfa49af0be..0000000000 --- a/mindspore/lite/nnacl/fp32/range_fp32.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_RANGE_H_ -#define MINDSPORE_LITE_NNACL_RANGE_H_ - -#include "nnacl/op_base.h" - -typedef struct RangeParameter { - // Primitive parameter - OpParameter op_parameter_; - int dType_; - int start_; - int limit_; - int delta_; -} RangeParameter; - -#ifdef __cplusplus -extern "C" { -#endif -inline void Range(float *output_ptr, float start, float delta, int nums) { - for (int i = 0; i < nums; ++i, start += delta) { - output_ptr[i] = start; - } -} - -inline void RangeInt(int *output_ptr, int start, int delta, int nums) { - for (int i = 0; i < nums; ++i, start += delta) { - output_ptr[i] = start; - } -} - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_RANGE_H_ diff --git a/mindspore/lite/nnacl/fp32/rank_fp32.h b/mindspore/lite/nnacl/fp32/rank_fp32.h deleted file mode 100644 index 7c132c4346..0000000000 --- a/mindspore/lite/nnacl/fp32/rank_fp32.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_RANK_H_ -#define MINDSPORE_LITE_NNACL_RANK_H_ - -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif -inline void Rank(float *output, int rank) { - output[0] = (float)(rank); - return; -} -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_RANK_H_ diff --git a/mindspore/lite/nnacl/fp32/reduce_fp32.h b/mindspore/lite/nnacl/fp32/reduce_fp32.h deleted file mode 100644 index 30901f0622..0000000000 --- a/mindspore/lite/nnacl/fp32/reduce_fp32.h +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_REDUCE_H_ -#define MINDSPORE_LITE_NNACL_FP32_REDUCE_H_ -#include "nnacl/op_base.h" -#include "nnacl/reduce_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -int ReduceMean(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, - int thread_num); -int IntReduceMean(int outer_size, int inner_size, int axis_size, const int *src_data, int *dst_data, int tid, - int thread_num); -int ReduceSum(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, - int thread_num); -int IntReduceSum(int outer_size, int inner_size, int axis_size, const int *src_data, int *dst_data, int tid, - int thread_num); -int ReduceMax(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, - int thread_num); -int IntReduceMax(int outer_size, int inner_size, int axis_size, const int *src_data, int *dst_data, int tid, - int thread_num); -int ReduceMin(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, - int thread_num); -int IntReduceMin(int outer_size, int inner_size, int axis_size, const int *src_data, int *dst_data, int tid, - int thread_num); -int ReduceProd(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, - int thread_num); -int IntReduceProd(int outer_size, int inner_size, int axis_size, const int *src_data, int *dst_data, int tid, - int thread_num); -int ReduceSumSquare(int outer_size, int inner_size, int axis_size, const float *src_data, float *dst_data, int tid, - int thread_num); -int ReduceAll(int outer_size, int inner_size, int axis_size, const bool *src_data, bool *dst_data, int tid, - int thread_num); - -#ifdef ENABLE_NNACL_INFER_SHAPE -int ReduceInferShape(int **in_shape, size_t *dim_size, int *out_shape, int *in_format, int *out_format, - int *in_datatype, int *out_datatype, OpParameter *param); -#endif -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_REDUCE_H_ diff --git a/mindspore/lite/nnacl/fp32/resize_fp32.h b/mindspore/lite/nnacl/fp32/resize_fp32.h deleted file mode 100644 index 9a65712aea..0000000000 --- a/mindspore/lite/nnacl/fp32/resize_fp32.h +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_RESIZE_H_ -#define MINDSPORE_LITE_NNACL_FP32_RESIZE_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif -typedef float (*CalculateOriginalCoordinate)(int x_resized, int length_original, int length_resized); - -int PrepareResizeBilinear(const int *input_shape, const int *output_shape, CalculateOriginalCoordinate calculate, - int *y_bottoms, int *y_tops, int *x_lefts, int *x_rights, float *y_bottom_weights, - float *x_left_weights); - -int PrepareResizeBicubic(const int *input_shape, const int *output_shape, CalculateOriginalCoordinate calculate, - int *y_tops, int *x_lefts, float *y_weights, float *x_weights, float cubic_coeff); - -int ResizeBilinear(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, - const int *y_bottoms, const int *y_tops, const int *x_lefts, const int *x_rights, - const float *y_bottom_weights, const float *x_left_weights, float *line0, float *line1, - const int h_begin, const int h_end); - -int ResizeBicubic(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, - const int *y_tops, const int *x_lefts, const float *y_weights, const float *x_weights, - float *line_buffer, const int h_begin, const int h_end); - -int PrepareCropAndResizeBilinear(const int *input_shape, const float *boxes, const int *box_idx, - const int *output_shape, int *y_bottoms, int *y_tops, int *x_lefts, int *x_rights, - float *y_bottom_weights, float *x_left_weights); - -int CropAndResizeBilinear(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, - const int *y_bottoms, const int *y_tops, const int *x_lefts, const int *x_rights, - const float *y_bottom_weights, const float *x_left_weights, float *line0, float *line1, - const int h_begin, const int h_end); - -int ResizeNearestNeighbor(const float *input_data, float *output_data, const int *input_shape, const int *output_shape, - CalculateOriginalCoordinate calculate, int coordinate_transform_mode, int tid, - int thread_num); - -float CalculateAsymmetric(int x_resized, int length_original, int length_resized); - -float CalculateAlignCorners(int x_resized, int length_original, int length_resized); - -float CalculateHalfPixel(int x_resized, int length_original, int length_resized); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_RESIZE_H_ diff --git a/mindspore/lite/nnacl/fp32/reverse_fp32.h b/mindspore/lite/nnacl/fp32/reverse_fp32.h deleted file mode 100644 index d8e58d17bd..0000000000 --- a/mindspore/lite/nnacl/fp32/reverse_fp32.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_REVERSE_H_ -#define MINDSPORE_LITE_NNACL_REVERSE_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" -#define REVERSE_SHAPE_MAX_SIZE 4 - -// For reverse. -typedef struct ReverseParameter { - OpParameter op_parameter_; - int axis_[REVERSE_SHAPE_MAX_SIZE]; - int num_axis_; -} ReverseParameter; - -#ifdef __cplusplus -extern "C" { -#endif -int Reverse(const float *input, float *output, size_t elem_size, int *index); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_REVERSE_H_ diff --git a/mindspore/lite/nnacl/fp32/reverse_sequence_fp32.h b/mindspore/lite/nnacl/fp32/reverse_sequence_fp32.h deleted file mode 100644 index d0b18c39dd..0000000000 --- a/mindspore/lite/nnacl/fp32/reverse_sequence_fp32.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_REVERSE_SEQUENCE_H_ -#define MINDSPORE_LITE_NNACL_FP32_REVERSE_SEQUENCE_H_ - -#include -#include "nnacl/common_func.h" -#include "nnacl/op_base.h" -#include "nnacl/reverse_sequence_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -void ReverseSequence(float *input0, const void *input1, float *output, ReverseSequenceParameter *para); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_REVERSE_SEQUENCE_H_ diff --git a/mindspore/lite/nnacl/fp32/roi_pooling_fp32.h b/mindspore/lite/nnacl/fp32/roi_pooling_fp32.h deleted file mode 100644 index 7e13f3d218..0000000000 --- a/mindspore/lite/nnacl/fp32/roi_pooling_fp32.h +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_ROI_POOLING_H_ -#define MINDSPORE_LITE_NNACL_FP32_ROI_POOLING_H_ - -#include "nnacl/op_base.h" - -typedef struct ROIPoolingParameter { - // primitive parameter - OpParameter op_parameter_; - int pooledW_; - int pooledH_; - float scale_; - - // shape correlative - int in_strides_[DIMENSION_4D]; - int out_strides_[DIMENSION_4D]; - int ndim_; - int input_w_; - int input_h_; - int input_n_; - int input_c_; - int output_w_; - int output_h_; - int output_n_; - int output_c_; - - // other parameter - int thread_num_; -} ROIPoolingParameter; - -#ifdef __cplusplus -extern "C" { -#endif -int ROIPooling(const float *in_ptr, float *out_ptr, const float *roi, float *max_c, int tid, - const ROIPoolingParameter *param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_ROI_POOLING_H_ diff --git a/mindspore/lite/nnacl/fp32/scale_fp32.h b/mindspore/lite/nnacl/fp32/scale_fp32.h deleted file mode 100644 index bc1df65cb3..0000000000 --- a/mindspore/lite/nnacl/fp32/scale_fp32.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_SCALE_FP32_H_ -#define MINDSPORE_LITE_NNACL_SCALE_FP32_H_ - -#include "nnacl/op_base.h" -#include "nnacl/scale.h" -#ifdef __cplusplus -extern "C" { -#endif -void DoScale(const float *in_data, float *out_data, const float *scale, const float *offset, int task_id, - const ScaleParameter *scale_param); -void DoScaleRelu(const float *in_data, float *out_data, const float *scale, const float *offset, int task_id, - const ScaleParameter *scale_param); -void DoScaleRelu6(const float *in_data, float *out_data, const float *scale, const float *offset, int task_id, - const ScaleParameter *scale_param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_SCALE_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/scatter_nd_fp32.h b/mindspore/lite/nnacl/fp32/scatter_nd_fp32.h deleted file mode 100644 index fcae3a8d31..0000000000 --- a/mindspore/lite/nnacl/fp32/scatter_nd_fp32.h +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_SCATTER_ND_FP32_H_ -#define MINDSPORE_LITE_NNACL_FP32_SCATTER_ND_FP32_H_ - -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif -int DoScatterND(float *output_ptr, const float *update, int *output_unit_offsets, int unit_size, int num_units); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_SCATTER_ND_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/softmax_fp32.h b/mindspore/lite/nnacl/fp32/softmax_fp32.h deleted file mode 100644 index f8ec5ffeb3..0000000000 --- a/mindspore/lite/nnacl/fp32/softmax_fp32.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_SOFTMAX_H_ -#define MINDSPORE_LITE_NNACL_FP32_SOFTMAX_H_ - -#include "nnacl/op_base.h" -#include "nnacl/softmax_parameter.h" -#ifdef __cplusplus -extern "C" { -#endif -void Softmax(const float *input_ptr, float *output_ptr, float *sum_data, const SoftmaxParameter *parameter); -void SoftmaxLastAxis(const float *src, float *dst, int batch, int channel); -void SoftmaxNorm(const float *src, float *dst, int batch, int channel); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_SOFTMAX_H_ diff --git a/mindspore/lite/nnacl/fp32/sparse_to_dense_fp32.h b/mindspore/lite/nnacl/fp32/sparse_to_dense_fp32.h deleted file mode 100644 index 2be2695bc5..0000000000 --- a/mindspore/lite/nnacl/fp32/sparse_to_dense_fp32.h +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_SPARSETODENSE_H_ -#define MINDSPORE_LITE_NNACL_FP32_SPARSETODENSE_H_ - -#include "nnacl/sparse_to_dense_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -void SparseToDense(int **sparse_indices_vect, const int *output_shape, const float *sparse_values, float default_value, - float *output, bool isScalar, int index_start, int index_end, int out_width); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_SPARSETODENSE_H_ diff --git a/mindspore/lite/nnacl/fp32/splice_fp32.h b/mindspore/lite/nnacl/fp32/splice_fp32.h deleted file mode 100644 index 42db661670..0000000000 --- a/mindspore/lite/nnacl/fp32/splice_fp32.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_SPLICE_FP32_H_ -#define MINDSPORE_LITE_NNACL_FP32_SPLICE_FP32_H_ -#include -#include "nnacl/splice_parameter.h" -#ifdef __cplusplus -extern "C" { -#endif - -void SpliceFp32(const float *src_data, int src_row, int src_col, const SpliceParameter *splice_parameter, - float *dst_data, int dst_row, int dst_col); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_FP32_SPLICE_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/squared_difference.c b/mindspore/lite/nnacl/fp32/squared_difference.c deleted file mode 100644 index 0340329009..0000000000 --- a/mindspore/lite/nnacl/fp32/squared_difference.c +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SQUARED_DIFFERENCE_H_ -#define MINDSPORE_LITE_NNACL_SQUARED_DIFFERENCE_H_ - -#include "nnacl/fp32/squared_difference.h" -#include "nnacl/fp32/sub_fp32.h" -#include "nnacl/fp32/mul_fp32.h" - -int ElementSquaredDifference(const float *in0, const float *in1, float *out, int size) { - ElementSub(in0, in1, out, size); - return ElementMul(out, out, out, size); -} - -#endif // MINDSPORE_LITE_NNACL_SQUARED_DIFFERENCE_H_ diff --git a/mindspore/lite/nnacl/fp32/squared_difference.h b/mindspore/lite/nnacl/fp32/squared_difference.h deleted file mode 100644 index 71098b5525..0000000000 --- a/mindspore/lite/nnacl/fp32/squared_difference.h +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SQUARED_DIFFERENCE_H_ -#define MINDSPORE_LITE_NNACL_SQUARED_DIFFERENCE_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" -#include "nnacl/base/arithmetic_base.h" -#include "nnacl/errorcode.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/* Element Squared Difference */ -int ElementSquaredDifference(const float *in0, const float *in1, float *out, int size); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_SQUARED_DIFFERENCE_H_ diff --git a/mindspore/lite/nnacl/fp32/strided_slice_fp32.h b/mindspore/lite/nnacl/fp32/strided_slice_fp32.h deleted file mode 100644 index 12a0c31779..0000000000 --- a/mindspore/lite/nnacl/fp32/strided_slice_fp32.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_STRIDED_SLICE_FP32_H_ -#define MINDSPORE_LITE_NNACL_FP32_STRIDED_SLICE_FP32_H_ - -#include "nnacl/op_base.h" -#include "nnacl/strided_slice_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -int DoStridedSlice(const void *inputs, void *output, StridedSliceParameter *param); - -void FastStride(const uint8_t *input, uint8_t *output, int split_len, int stride, size_t outer, size_t inner_size, - size_t in_offset); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_STRIDED_SLICE_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/sub_fp32.h b/mindspore/lite/nnacl/fp32/sub_fp32.h deleted file mode 100644 index b846417190..0000000000 --- a/mindspore/lite/nnacl/fp32/sub_fp32.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SUB_FP32_H_ -#define MINDSPORE_LITE_NNACL_SUB_FP32_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" -#include "nnacl/base/arithmetic_base.h" -#include "nnacl/errorcode.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ElementSub(const float *in0, const float *in1, float *out, int size); -int ElementSubInt(const int *in0, const int *in1, int *out, int size); -int ElementSubRelu(const float *in0, const float *in1, float *out, int size); -int ElementSubRelu6(const float *in0, const float *in1, float *out, int size); -int ElementOptSub(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); -int ElementOptSubRelu(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); -int ElementOptSubRelu6(const float *in0, const float *in1, float *out, int size, const ArithmeticParameter *param); -int ElementOptSubInt(const int *in0, const int *in1, int *out, int size, const ArithmeticParameter *param); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_SUB_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32/topk_fp32.h b/mindspore/lite/nnacl/fp32/topk_fp32.h deleted file mode 100644 index 64bfd2a242..0000000000 --- a/mindspore/lite/nnacl/fp32/topk_fp32.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_TOPK_H_ -#define MINDSPORE_LITE_NNACL_TOPK_H_ - -#include "nnacl/op_base.h" - -typedef struct TopkNode { - float element; - int32_t index; -} TopkNode; - -typedef struct TopkParameter { - // primitive parameter - OpParameter op_parameter_; - int k_; - bool sorted_; - - // other parameter - int last_dim_size_; - int loop_num_; - void *topk_node_list_; -} TopkParameter; - -#ifdef __cplusplus -extern "C" { -#endif -void Topk(float *input_data, float *output_data, int32_t *output_index, TopkParameter *parameter); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_TOPK_H_ diff --git a/mindspore/lite/nnacl/fp32/transpose_fp32.h b/mindspore/lite/nnacl/fp32/transpose_fp32.h deleted file mode 100644 index b3e6a5fb3b..0000000000 --- a/mindspore/lite/nnacl/fp32/transpose_fp32.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_TRANSPOSE_H_ -#define MINDSPORE_LITE_NNACL_FP32_TRANSPOSE_H_ - -#include -#include "nnacl/transpose.h" -#include "nnacl/errorcode.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int DoTransposeFp32(const float *in_data, float *out_data, const int *output_shape, TransposeParameter *param); -void TransposeDimsFp32(const float *in_data, float *out_data, const int *output_shape, int *size, int *position, - TransposeParameter *transpose_param, int task_id, int thread_num); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_TRANSPOSE_H_ diff --git a/mindspore/lite/nnacl/fp32/unique_fp32.h b/mindspore/lite/nnacl/fp32/unique_fp32.h deleted file mode 100644 index 95b89350c1..0000000000 --- a/mindspore/lite/nnacl/fp32/unique_fp32.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_UNIQUE_H -#define MINDSPORE_LITE_NNACL_UNIQUE_H - -#include "nnacl/op_base.h" - -typedef struct UniqueParameter { - // primitive parameter - OpParameter op_parameter_; -} UniqueParameter; - -#ifdef __cplusplus -extern "C" { -#endif -void Unique(const float *input, int input_len, float *output0, int *output0_len, int *output1); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_UNIQUE_H diff --git a/mindspore/lite/nnacl/fp32/where_fp32.h b/mindspore/lite/nnacl/fp32/where_fp32.h deleted file mode 100644 index 70f7d44217..0000000000 --- a/mindspore/lite/nnacl/fp32/where_fp32.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_WHERE_Fp32_H_ -#define MINDSPORE_LITE_NNACL_FP32_WHERE_Fp32_H_ - -#include "nnacl/op_base.h" -#include "nnacl/where_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -void WhereWithTripleInputs(const bool *condition, const float *x, const float *y, float *output, - WhereParameter *where_param_, int task_id); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_WHERE_Fp32_H_ diff --git a/mindspore/lite/nnacl/fp32/winograd_transform.h b/mindspore/lite/nnacl/fp32/winograd_transform.h deleted file mode 100644 index 58da682215..0000000000 --- a/mindspore/lite/nnacl/fp32/winograd_transform.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_WINOGRAD_TRANSFORM_H_ -#define MINDSPORE_LITE_NNACL_WINOGRAD_TRANSFORM_H_ - -#ifdef ENABLE_ARM -#include -#endif -#include -#include "nnacl/pack.h" -#include "nnacl/fp32/winograd_utils.h" - -#ifdef __cplusplus -extern "C" { -#endif -// for fp32 winograd input/output transform -void WinogradInputTransform(const float *input_data, float *trans_input, float *tmp_data, int cal_num, - int out_tile_index, int out_w_block_num, const ConvParameter *conv_param, - InputTransFunc func); - -void WinogradOutputTransform(const float *gemm_out, float *out_data, const float *bias_data, int cal_num, - int out_tile_index, int output_unit_num, const ConvParameter *conv_param, - OutputTransFunc func); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_WINOGRAD_TRANSFORM_H_ diff --git a/mindspore/lite/nnacl/fp32/winograd_utils.h b/mindspore/lite/nnacl/fp32/winograd_utils.h deleted file mode 100644 index 22ed181a9a..0000000000 --- a/mindspore/lite/nnacl/fp32/winograd_utils.h +++ /dev/null @@ -1,316 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_WINOGRAD_UTILS_H_ -#define MINDSPORE_LITE_NNACL_WINOGRAD_UTILS_H_ - -#ifdef ENABLE_ARM -#include -#endif -#include "nnacl/conv_parameter.h" -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif -typedef void (*InputTransFunc)(const float *src_data, float *dst_data, int src_step, int dst_step, int real_c); - -typedef void (*OutputTransFunc)(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); - -void GeneralInputTransformUnit(const float *src_data, float *dst_data, const float *matrix_b, const float *matrix_bt, - int src_step, int dst_step, int in_unit); - -void GeneralOutputTransformUnit(const float *src_data, float *dst_data, const float *bias_data, const float *matrix_a, - const float *matrix_at, int src_step, int dst_step, int in_unit, int out_unit); - -#define Load16Data \ - src[0] = MS_LDQ_F32(src_data + 0 * src_step); \ - src[1] = MS_LDQ_F32(src_data + 1 * src_step); \ - src[2] = MS_LDQ_F32(src_data + 2 * src_step); \ - src[3] = MS_LDQ_F32(src_data + 3 * src_step); \ - src[4] = MS_LDQ_F32(src_data + 4 * src_step); \ - src[5] = MS_LDQ_F32(src_data + 5 * src_step); \ - src[6] = MS_LDQ_F32(src_data + 6 * src_step); \ - src[7] = MS_LDQ_F32(src_data + 7 * src_step); \ - src[8] = MS_LDQ_F32(src_data + 8 * src_step); \ - src[9] = MS_LDQ_F32(src_data + 9 * src_step); \ - src[10] = MS_LDQ_F32(src_data + 10 * src_step); \ - src[11] = MS_LDQ_F32(src_data + 11 * src_step); \ - src[12] = MS_LDQ_F32(src_data + 12 * src_step); \ - src[13] = MS_LDQ_F32(src_data + 13 * src_step); \ - src[14] = MS_LDQ_F32(src_data + 14 * src_step); \ - src[15] = MS_LDQ_F32(src_data + 15 * src_step); - -#define Load36Data \ - src[0] = MS_LDQ_F32(src_data + 0 * src_step); \ - src[1] = MS_LDQ_F32(src_data + 1 * src_step); \ - src[2] = MS_LDQ_F32(src_data + 2 * src_step); \ - src[3] = MS_LDQ_F32(src_data + 3 * src_step); \ - src[4] = MS_LDQ_F32(src_data + 4 * src_step); \ - src[5] = MS_LDQ_F32(src_data + 5 * src_step); \ - src[6] = MS_LDQ_F32(src_data + 6 * src_step); \ - src[7] = MS_LDQ_F32(src_data + 7 * src_step); \ - src[8] = MS_LDQ_F32(src_data + 8 * src_step); \ - src[9] = MS_LDQ_F32(src_data + 9 * src_step); \ - src[10] = MS_LDQ_F32(src_data + 10 * src_step); \ - src[11] = MS_LDQ_F32(src_data + 11 * src_step); \ - src[12] = MS_LDQ_F32(src_data + 12 * src_step); \ - src[13] = MS_LDQ_F32(src_data + 13 * src_step); \ - src[14] = MS_LDQ_F32(src_data + 14 * src_step); \ - src[15] = MS_LDQ_F32(src_data + 15 * src_step); \ - src[16] = MS_LDQ_F32(src_data + 16 * src_step); \ - src[17] = MS_LDQ_F32(src_data + 17 * src_step); \ - src[18] = MS_LDQ_F32(src_data + 18 * src_step); \ - src[19] = MS_LDQ_F32(src_data + 19 * src_step); \ - src[20] = MS_LDQ_F32(src_data + 20 * src_step); \ - src[21] = MS_LDQ_F32(src_data + 21 * src_step); \ - src[22] = MS_LDQ_F32(src_data + 22 * src_step); \ - src[23] = MS_LDQ_F32(src_data + 23 * src_step); \ - src[24] = MS_LDQ_F32(src_data + 24 * src_step); \ - src[25] = MS_LDQ_F32(src_data + 25 * src_step); \ - src[26] = MS_LDQ_F32(src_data + 26 * src_step); \ - src[27] = MS_LDQ_F32(src_data + 27 * src_step); \ - src[28] = MS_LDQ_F32(src_data + 28 * src_step); \ - src[29] = MS_LDQ_F32(src_data + 29 * src_step); \ - src[30] = MS_LDQ_F32(src_data + 30 * src_step); \ - src[31] = MS_LDQ_F32(src_data + 31 * src_step); \ - src[32] = MS_LDQ_F32(src_data + 32 * src_step); \ - src[33] = MS_LDQ_F32(src_data + 33 * src_step); \ - src[34] = MS_LDQ_F32(src_data + 34 * src_step); \ - src[35] = MS_LDQ_F32(src_data + 35 * src_step); - -#define Load64Data \ - src[0] = MS_LDQ_F32(src_data + 0 * src_step); \ - src[1] = MS_LDQ_F32(src_data + 1 * src_step); \ - src[2] = MS_LDQ_F32(src_data + 2 * src_step); \ - src[3] = MS_LDQ_F32(src_data + 3 * src_step); \ - src[4] = MS_LDQ_F32(src_data + 4 * src_step); \ - src[5] = MS_LDQ_F32(src_data + 5 * src_step); \ - src[6] = MS_LDQ_F32(src_data + 6 * src_step); \ - src[7] = MS_LDQ_F32(src_data + 7 * src_step); \ - src[8] = MS_LDQ_F32(src_data + 8 * src_step); \ - src[9] = MS_LDQ_F32(src_data + 9 * src_step); \ - src[10] = MS_LDQ_F32(src_data + 10 * src_step); \ - src[11] = MS_LDQ_F32(src_data + 11 * src_step); \ - src[12] = MS_LDQ_F32(src_data + 12 * src_step); \ - src[13] = MS_LDQ_F32(src_data + 13 * src_step); \ - src[14] = MS_LDQ_F32(src_data + 14 * src_step); \ - src[15] = MS_LDQ_F32(src_data + 15 * src_step); \ - src[16] = MS_LDQ_F32(src_data + 16 * src_step); \ - src[17] = MS_LDQ_F32(src_data + 17 * src_step); \ - src[18] = MS_LDQ_F32(src_data + 18 * src_step); \ - src[19] = MS_LDQ_F32(src_data + 19 * src_step); \ - src[20] = MS_LDQ_F32(src_data + 20 * src_step); \ - src[21] = MS_LDQ_F32(src_data + 21 * src_step); \ - src[22] = MS_LDQ_F32(src_data + 22 * src_step); \ - src[23] = MS_LDQ_F32(src_data + 23 * src_step); \ - src[24] = MS_LDQ_F32(src_data + 24 * src_step); \ - src[25] = MS_LDQ_F32(src_data + 25 * src_step); \ - src[26] = MS_LDQ_F32(src_data + 26 * src_step); \ - src[27] = MS_LDQ_F32(src_data + 27 * src_step); \ - src[28] = MS_LDQ_F32(src_data + 28 * src_step); \ - src[29] = MS_LDQ_F32(src_data + 29 * src_step); \ - src[30] = MS_LDQ_F32(src_data + 30 * src_step); \ - src[31] = MS_LDQ_F32(src_data + 31 * src_step); \ - src[32] = MS_LDQ_F32(src_data + 32 * src_step); \ - src[33] = MS_LDQ_F32(src_data + 33 * src_step); \ - src[34] = MS_LDQ_F32(src_data + 34 * src_step); \ - src[35] = MS_LDQ_F32(src_data + 35 * src_step); \ - src[36] = MS_LDQ_F32(src_data + 36 * src_step); \ - src[37] = MS_LDQ_F32(src_data + 37 * src_step); \ - src[38] = MS_LDQ_F32(src_data + 38 * src_step); \ - src[39] = MS_LDQ_F32(src_data + 39 * src_step); \ - src[40] = MS_LDQ_F32(src_data + 40 * src_step); \ - src[41] = MS_LDQ_F32(src_data + 41 * src_step); \ - src[42] = MS_LDQ_F32(src_data + 42 * src_step); \ - src[43] = MS_LDQ_F32(src_data + 43 * src_step); \ - src[44] = MS_LDQ_F32(src_data + 44 * src_step); \ - src[45] = MS_LDQ_F32(src_data + 45 * src_step); \ - src[46] = MS_LDQ_F32(src_data + 46 * src_step); \ - src[47] = MS_LDQ_F32(src_data + 47 * src_step); \ - src[48] = MS_LDQ_F32(src_data + 48 * src_step); \ - src[49] = MS_LDQ_F32(src_data + 49 * src_step); \ - src[50] = MS_LDQ_F32(src_data + 50 * src_step); \ - src[51] = MS_LDQ_F32(src_data + 51 * src_step); \ - src[52] = MS_LDQ_F32(src_data + 52 * src_step); \ - src[53] = MS_LDQ_F32(src_data + 53 * src_step); \ - src[54] = MS_LDQ_F32(src_data + 54 * src_step); \ - src[55] = MS_LDQ_F32(src_data + 55 * src_step); \ - src[56] = MS_LDQ_F32(src_data + 56 * src_step); \ - src[57] = MS_LDQ_F32(src_data + 57 * src_step); \ - src[58] = MS_LDQ_F32(src_data + 58 * src_step); \ - src[59] = MS_LDQ_F32(src_data + 59 * src_step); \ - src[60] = MS_LDQ_F32(src_data + 60 * src_step); \ - src[61] = MS_LDQ_F32(src_data + 61 * src_step); \ - src[62] = MS_LDQ_F32(src_data + 62 * src_step); \ - src[63] = MS_LDQ_F32(src_data + 63 * src_step); - -InputTransFunc GetInputTransFunc(int input_unit); - -void InputTransform4x4Unit(const float *src_data, float *dst_data, int src_step, int dst_step, int real_c); - -void InputTransform6x6Unit(const float *src_data, float *dst_data, int src_step, int dst_step, int real_c); - -void InputTransform8x8Unit(const float *src_data, float *dst_data, int src_step, int dst_step, int real_c); - -OutputTransFunc GetOutputTransFunc(int input_unit, int output_unit, ActType act_type); - -#define Store4Data \ - MS_STQ_F32(dst_data, m[0]); \ - MS_STQ_F32(dst_data + out_c, m[1]); \ - MS_STQ_F32(dst_data + dst_step * out_c, m[2]); \ - MS_STQ_F32(dst_data + dst_step * out_c + out_c, m[3]); - -#define Store9Data \ - MS_STQ_F32(dst_data, m[0]); \ - MS_STQ_F32(dst_data + out_c, m[1]); \ - MS_STQ_F32(dst_data + 2 * out_c, m[2]); \ - MS_STQ_F32(dst_data + dst_step * out_c, m[3]); \ - MS_STQ_F32(dst_data + dst_step * out_c + out_c, m[4]); \ - MS_STQ_F32(dst_data + dst_step * out_c + 2 * out_c, m[5]); \ - MS_STQ_F32(dst_data + 2 * dst_step * out_c, m[6]); \ - MS_STQ_F32(dst_data + 2 * dst_step * out_c + out_c, m[7]); \ - MS_STQ_F32(dst_data + 2 * dst_step * out_c + 2 * out_c, m[8]); - -#define Store16Data \ - MS_STQ_F32(dst_data, m[0]); \ - MS_STQ_F32(dst_data + out_c, m[1]); \ - MS_STQ_F32(dst_data + 2 * out_c, m[2]); \ - MS_STQ_F32(dst_data + 3 * out_c, m[3]); \ - MS_STQ_F32(dst_data + dst_step * out_c, m[4]); \ - MS_STQ_F32(dst_data + dst_step * out_c + out_c, m[5]); \ - MS_STQ_F32(dst_data + dst_step * out_c + 2 * out_c, m[6]); \ - MS_STQ_F32(dst_data + dst_step * out_c + 3 * out_c, m[7]); \ - MS_STQ_F32(dst_data + 2 * dst_step * out_c, m[8]); \ - MS_STQ_F32(dst_data + 2 * dst_step * out_c + out_c, m[9]); \ - MS_STQ_F32(dst_data + 2 * dst_step * out_c + 2 * out_c, m[10]); \ - MS_STQ_F32(dst_data + 2 * dst_step * out_c + 3 * out_c, m[11]); \ - MS_STQ_F32(dst_data + 3 * dst_step * out_c, m[12]); \ - MS_STQ_F32(dst_data + 3 * dst_step * out_c + out_c, m[13]); \ - MS_STQ_F32(dst_data + 3 * dst_step * out_c + 2 * out_c, m[14]); \ - MS_STQ_F32(dst_data + 3 * dst_step * out_c + 3 * out_c, m[15]); - -#define Store25Data \ - MS_STQ_F32(dst_data, m[0]); \ - MS_STQ_F32(dst_data + out_c, m[1]); \ - MS_STQ_F32(dst_data + 2 * out_c, m[2]); \ - MS_STQ_F32(dst_data + 3 * out_c, m[3]); \ - MS_STQ_F32(dst_data + 4 * out_c, m[4]); \ - MS_STQ_F32(dst_data + dst_step * out_c, m[5]); \ - MS_STQ_F32(dst_data + dst_step * out_c + out_c, m[6]); \ - MS_STQ_F32(dst_data + dst_step * out_c + 2 * out_c, m[7]); \ - MS_STQ_F32(dst_data + dst_step * out_c + 3 * out_c, m[8]); \ - MS_STQ_F32(dst_data + dst_step * out_c + 4 * out_c, m[9]); \ - MS_STQ_F32(dst_data + 2 * dst_step * out_c, m[10]); \ - MS_STQ_F32(dst_data + 2 * dst_step * out_c + out_c, m[11]); \ - MS_STQ_F32(dst_data + 2 * dst_step * out_c + 2 * out_c, m[12]); \ - MS_STQ_F32(dst_data + 2 * dst_step * out_c + 3 * out_c, m[13]); \ - MS_STQ_F32(dst_data + 2 * dst_step * out_c + 4 * out_c, m[14]); \ - MS_STQ_F32(dst_data + 3 * dst_step * out_c, m[15]); \ - MS_STQ_F32(dst_data + 3 * dst_step * out_c + out_c, m[16]); \ - MS_STQ_F32(dst_data + 3 * dst_step * out_c + 2 * out_c, m[17]); \ - MS_STQ_F32(dst_data + 3 * dst_step * out_c + 3 * out_c, m[18]); \ - MS_STQ_F32(dst_data + 3 * dst_step * out_c + 4 * out_c, m[19]); \ - MS_STQ_F32(dst_data + 4 * dst_step * out_c, m[20]); \ - MS_STQ_F32(dst_data + 4 * dst_step * out_c + out_c, m[21]); \ - MS_STQ_F32(dst_data + 4 * dst_step * out_c + 2 * out_c, m[22]); \ - MS_STQ_F32(dst_data + 4 * dst_step * out_c + 3 * out_c, m[23]); \ - MS_STQ_F32(dst_data + 4 * dst_step * out_c + 4 * out_c, m[24]); - -void OutputTransform4x2Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, - int out_c, int r_w, int r_h, int r_c); -void OutputTransform4x2ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform4x2Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform4x3Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, - int out_c, int r_w, int r_h, int r_c); -void OutputTransform4x3ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform4x3Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); - -void OutputTransform6x2Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, - int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x2ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x2Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x3Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, - int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x3ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x3Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x4Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, - int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x4ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x4Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x5Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, - int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x5ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform6x5Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); - -void OutputTransform8x2Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, - int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x2ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x2Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x3Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, - int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x3ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x3Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x4Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, - int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x4ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x4Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x5Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, - int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x5ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x5Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, - int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x6ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x6Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x7Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, int dst_step, - int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x7ReluUnit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); -void OutputTransform8x7Relu6Unit(const float *src_data, float *dst_data, const float *bias_data, int src_step, - int dst_step, int out_c, int r_w, int r_h, int r_c); - -int SelectOutputUnit(ConvParameter *conv_param); - -bool CheckIfUseWinograd(int *output_unit, ConvParameter *conv_param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_WINOGRAD_UTILS_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/activation_grad.h b/mindspore/lite/nnacl/fp32_grad/activation_grad.h deleted file mode 100644 index f6a3db303f..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/activation_grad.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_ACTIVATION_GRAD_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_ACTIVATION_GRAD_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/fp32/arithmetic_fp32.h" -#include "nnacl/errorcode.h" - -typedef struct ActivationGradParameter { - OpParameter op_parameter; - int type_; - float alpha_; -} ActivationGradParameter; -#ifdef __cplusplus -extern "C" { -#endif - -int ReluGrad(float *src0, float *src1, size_t length, float *dst); -int Relu6Grad(float *src0, float *src1, size_t length, float *dst); -int LReluGrad(float *src0, float *src1, size_t length, float *dst, float alpha); -int SigmoidGrad(float *src0, float *src1, size_t length, float *dst); -int TanhGrad(float *src0, float *src1, size_t length, float *dst); -int HSwishGrad(float *src0, float *src1, size_t length, float *dst); -int HSigmoidGrad(float *src0, float *src1, size_t length, float *dst); -int EluGrad(float *src0, float *src1, size_t length, float *dst, float alpha); -int GeluGrad(float *src0, float *src1, size_t length, float *dst); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_ACTIVATION_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/arithmetic_grad.h b/mindspore/lite/nnacl/fp32_grad/arithmetic_grad.h deleted file mode 100644 index f4077634a7..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/arithmetic_grad.h +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_ARITHMETIC_GRAD_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_ARITHMETIC_GRAD_H_ - -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif -void ElementDivNegSquare(const float *nom, const float *denom, float *output, int element_size); -void ElementMulAndDivNegSquare(const float *a, const float *b, const float *denom, float *output, int element_size); -int ElementAbsGrad(const float *in1, const float *in2, float *out, int element_size); -void MaximumByAxes(const float *input0, const float *input1, const float *dy, const int *input0_dims, - const int *input1_dims, const int *dy_dims, float *output0, float *output1, int num_dims); -void MinimumByAxes(const float *input0, const float *input1, const float *dy, const int *input0_dims, - const int *input1_dims, const int *dy_dims, float *output0, float *output1, int num_dims); -int ElementSqrtGrad(const float *in1, const float *in2, float *out, const int element_size); -int ElementRsqrtGrad(const float *in1, const float *in2, float *out, const int element_size); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_ARITHMETIC_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/batch_norm.h b/mindspore/lite/nnacl/fp32_grad/batch_norm.h deleted file mode 100644 index 9931819696..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/batch_norm.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2019 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_BATCH_NORM_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_BATCH_NORM_H_ - -#include "nnacl/op_base.h" - -typedef struct BNGradParameter { - OpParameter op_parameter_; - float epsilon_; -} BNGradParameter; - -#ifdef __cplusplus -extern "C" { -#endif - -void var2Invar(float *save_var, int size, float eps); -void backwardAll(const float *in, const float *yt, const float *mean, const float *invar, const float *scale, int size, - int ch, float *dxhat_sum, float *dxhathat_sum, float *dbias, float *dscale, float *dx); -void backwardP1(const float *in, const float *yt, const float *mean, const float *invar, const float *scale, int size, - int ch, float *dxhat_sum, float *dxhathat_sum, float *dbias, float *dscale); -void backwardP2(const float *in, const float *yt, const float *mean, const float *invar, const float *scale, int size, - int total_size, int ch, const float *dxhat_sum, const float *dxhathat_sum, float *dx); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_BATCH_NORM_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/binary_cross_entropy.h b/mindspore/lite/nnacl/fp32_grad/binary_cross_entropy.h deleted file mode 100644 index dc921691cd..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/binary_cross_entropy.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_BINARY_CROSS_ENTROPY_H_ -#define MINDSPORE_LITE_NNACL_BINARY_CROSS_ENTROPY_H_ - -#include "nnacl/op_base.h" - -typedef struct BinaryCrossEntropyParameter { - OpParameter op_parameter_; - int reduction; -} BinaryCrossEntropyParameter; - -#ifdef __cplusplus -extern "C" { -#endif - -void BinaryCrossEntropy(const int input_size, const int reduction, const float *input_x, const float *input_y, - const float *weight, float *loss, float *tmp_loss); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_BINARY_CROSS_ENTROPY_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/binary_cross_entropy_grad.h b/mindspore/lite/nnacl/fp32_grad/binary_cross_entropy_grad.h deleted file mode 100644 index f3c7c3952d..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/binary_cross_entropy_grad.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_BINARY_CROSS_ENTROPY_GRAD_H_ -#define MINDSPORE_LITE_NNACL_BINARY_CROSS_ENTROPY_GRAD_H_ - -#include "nnacl/op_base.h" - -typedef struct BinaryCrossEntropyGradParameter { - OpParameter op_parameter_; - int reduction; -} BinaryCrossEntropyGradParameter; - -#ifdef __cplusplus -extern "C" { -#endif - -int BinaryCrossEntropyGrad(const int input_size, const int reduction, const float *input_x, const float *input_y, - const float *weight, const float *dloss, float *dx); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_BINARY_CROSS_ENTROPY_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/convolution_grad_filter.h b/mindspore/lite/nnacl/fp32_grad/convolution_grad_filter.h deleted file mode 100644 index 0209f94285..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/convolution_grad_filter.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_CONVOLUTION_GRAD_FILTER_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_CONVOLUTION_GRAD_FILTER_H_ - -#include -#include "nnacl/conv_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ConvDwFilterGrad(const float *x, const float *dy, float *dw, int start, int count, const ConvParameter *conv_param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_CONVOLUTION_GRAD_FILTER_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/dropout_grad.h b/mindspore/lite/nnacl/fp32_grad/dropout_grad.h deleted file mode 100644 index 1124eb29f6..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/dropout_grad.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_DROPOUT_GRAD_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_DROPOUT_GRAD_H_ - -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void DropoutGrad(const float *yt_ptr, const float *mask, float *output_ptr, int length, float ratio); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_DROPOUT_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/dropout_parameter.h b/mindspore/lite/nnacl/fp32_grad/dropout_parameter.h deleted file mode 100644 index 789254abae..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/dropout_parameter.h +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_DROPOUT_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_DROPOUT_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct DropoutParameter { - OpParameter op_parameter_; - float ratio_; -} DropoutParameter; - -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_DROPOUT_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/gemm.h b/mindspore/lite/nnacl/fp32_grad/gemm.h deleted file mode 100644 index 91ae794584..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/gemm.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_GEMM_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_GEMM_H_ - -#include -#include "nnacl/op_base.h" -#ifdef __cplusplus -extern "C" { -#endif -typedef struct { - int ca; - int cb; - ActType atype; - float *bias; - float *mat_a; - float *mat_b; -} GemmCb; - -void GemmMatmulPlus(int ta, int tb, int M, int N, int K, float alpha, const float *mat_a, int lda, const float *mat_b, - int ldb, float beta, float *mat_c, int ldc, float *workspace, GemmCb *cb); -void GemmMatmul(int ta, int tb, int M, int N, int K, float alpha, const float *mat_a, int lda, const float *mat_b, - int ldb, float beta, float *mat_c, int ldc, float *workspace); -int MatSize(int row, int col, int round); -int MatSizeTotal(int row, int col, int deep, int inc); -void AddMatrix(const float *v1, float *v2, float beta, int row, int col, int stride); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_GEMM_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/layernorm_grad.h b/mindspore/lite/nnacl/fp32_grad/layernorm_grad.h deleted file mode 100644 index 3016fced41..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/layernorm_grad.h +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_LAYERNORM_GRAD_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_LAYERNORM_GRAD_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -void LayerNormGrad(const float *x, const float *dy, const float *var, const float *mean, const float *gamma, - int param_num, int param_size, int block_num, int block_size, float *dx, float *dg, float *db); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_LAYERNORM_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/layernormgrad_parameter.h b/mindspore/lite/nnacl/fp32_grad/layernormgrad_parameter.h deleted file mode 100644 index fcf35e95bf..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/layernormgrad_parameter.h +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_LAYERNORMGRAD_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_LAYERNORMGRAD_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct LayerNormGradParameter { - OpParameter op_parameter_; - int begin_norm_axis_; - int begin_params_axis_; -} LayerNormGradParameter; - -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_LAYERNORMGRAD_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/optimizer.h b/mindspore/lite/nnacl/fp32_grad/optimizer.h deleted file mode 100644 index 0cc52ced60..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/optimizer.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_OPTIMIZER_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_OPTIMIZER_H_ - -#include "nnacl/op_base.h" - -typedef struct ApplyMomentumParameter { - OpParameter op_parameter_; - bool use_nesterov_; - float grad_scale_; -} ApplyMomentumParameter; - -typedef struct SgdParameter { - OpParameter op_parameter_; - float dampening_; - bool use_nesterov_; - float weight_decay_; -} SgdParameter; - -typedef struct AdamParameter { - OpParameter op_parameter_; - bool use_nesterov_; -} AdamParameter; - -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_OPTIMIZER_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/pack_ext.h b/mindspore/lite/nnacl/fp32_grad/pack_ext.h deleted file mode 100644 index ca8b67336d..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/pack_ext.h +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_PACK_EXT_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_PACK_EXT_H_ - -#include -#include "nnacl/conv_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void RollingIm2ColPackUnitFp32(const float *input_data, const ConvParameter *conv_param, float *packed_input, - int real_cal_num, int block_index); -void RollingIm2ColPackDwUnitFp32(const float *input_data, const ConvParameter *conv_param, float *packed_input, - int real_cal_num, int block_index); - -void rolling_im2col_hwc(const float *in_data, float *data_col, const ConvParameter *conv_param, int rows, int start); -void rolling_im2row_hwc(const float *in_data, float *data_row, const ConvParameter *conv_param, int rows, int start); -void rolling_col2im_hwc(const float *data_col, float *data_im, const ConvParameter *conv_param, int rows, int start); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_PACK_EXT_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/pooling_grad.h b/mindspore/lite/nnacl/fp32_grad/pooling_grad.h deleted file mode 100644 index 1bfd684ad4..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/pooling_grad.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_POOLING_GRAD_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_POOLING_GRAD_H_ - -#include "nnacl/fp32/pooling_fp32.h" - -#ifdef __cplusplus -extern "C" { -#endif -void AvgPoolingGrad(const float *input_ptr, float *output_ptr, int count, PoolingParameter *pooling_param); -void MaxPoolingGrad(const float *input_ptr, const float *dy_ptr, float *output_ptr, int output_batch, - PoolingParameter *pooling_param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_POOLING_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/reduce_grad.h b/mindspore/lite/nnacl/fp32_grad/reduce_grad.h deleted file mode 100644 index 3f461d175f..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/reduce_grad.h +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_REDUCE_GRAD_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_REDUCE_GRAD_H_ - -#include - -#ifdef __cplusplus -extern "C" { -#endif -float ReduceMeanAll(const float *src, int size); -void ReduceSumByAxes(const float *input, const int *input_dims, float *output, const int *output_dims, int num_dims); -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_REDUCE_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/resize_grad.h b/mindspore/lite/nnacl/fp32_grad/resize_grad.h deleted file mode 100644 index 8d7630810e..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/resize_grad.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_RESIZE_GRAD_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_RESIZE_GRAD_H_ - -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct ResizeGradParameter { - OpParameter op_parameter_; - bool align_corners_; - int method; - size_t in_height_; - size_t in_width_; - size_t out_height_; - size_t out_width_; - float height_scale_; - float width_scale_; -} ResizeGradParameter; - -void ResizeNearestNeighborGrad(float *in_addr, float *out_addr, int batch_size, int channel, - ResizeGradParameter *param); -void ResizeBiLinearGrad(float *in_addr, float *out_addr, int batch_size, int channel, ResizeGradParameter *param); -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_RESIZE_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/smooth_l1_loss.h b/mindspore/lite/nnacl/fp32_grad/smooth_l1_loss.h deleted file mode 100644 index bc874768cc..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/smooth_l1_loss.h +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_SMOOTH_L1_LOSS_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_FP32_SMOOTH_L1_LOSS_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct SmoothL1LossParameter { - OpParameter op_parameter_; - float beta_; -} SmoothL1LossParameter; - -#endif // MINDSPORE_LITE_NNACL_FP32_SMOOTH_L1_LOSS_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/softmax_grad.h b/mindspore/lite/nnacl/fp32_grad/softmax_grad.h deleted file mode 100644 index d70a70730d..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/softmax_grad.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_SOFTMAX_GRAD_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_SOFTMAX_GRAD_H_ - -#include "nnacl/op_base.h" -#include "nnacl/fp32/softmax_fp32.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct SoftmaxCrossEntropyParameter { - // primitive parameter - OpParameter op_parameter_; - int n_dim_; - - // shape correlative - int input_shape_[5]; - - // other parameter - int32_t batch_size_; - unsigned int number_of_classes_; - bool is_grad_; -} SoftmaxCrossEntropyParameter; - -void SoftmaxGrad(const float *input_ptr, const float *yt_ptr, float *output_ptr, float *sum_data, float *sum_mul, - SoftmaxParameter *parameter); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_SOFTMAX_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/strided_slice_grad.h b/mindspore/lite/nnacl/fp32_grad/strided_slice_grad.h deleted file mode 100644 index 5ed2a68d76..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/strided_slice_grad.h +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_STRIDED_SLICE_GRAD_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_STRIDED_SLICE_GRAD_H_ - -#include "nnacl/op_base.h" -#include "nnacl/strided_slice_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -int DoStridedSliceGrad(const float *inputs, float *output, const int *dx_shape, StridedSliceParameter *param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_STRIDED_SLICE_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/unsorted_segment_sum.h b/mindspore/lite/nnacl/fp32_grad/unsorted_segment_sum.h deleted file mode 100644 index c0b891dc74..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/unsorted_segment_sum.h +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_UNSORTED_SEGMENT_SUM_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_UNSORTED_SEGMENT_SUM_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -int UnsortedSegmentSum(const float *input, int unit_num, int input_dim1, const int *indices, float *output, - int output_dim0, int output_dim1); -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_UNSORTED_SEGMENT_SUM_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/utils.h b/mindspore/lite/nnacl/fp32_grad/utils.h deleted file mode 100644 index f7895aa917..0000000000 --- a/mindspore/lite/nnacl/fp32_grad/utils.h +++ /dev/null @@ -1,72 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FP32_GRAD_UTILS_H_ -#define MINDSPORE_LITE_NNACL_FP32_GRAD_UTILS_H_ - -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif - -static inline size_t GetInputOffset(int num_dims, const int *dims, const int *iter) { - size_t offset = 0; - for (int idx = 0; idx < num_dims; ++idx) { - offset = offset * (size_t)(dims[idx]) + (size_t)(iter[idx]); - } - - return offset; -} - -static inline size_t GetOutputOffset(int num_dims, const int *dims, const int *iter, int num_axis, const int *axes) { - size_t offset = 0; - for (int idx = 0; idx < num_dims; ++idx) { - // if we need to skip this axis - int is_axis = 0; - for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) { - if (idx == axes[axis_idx]) { - is_axis = 1; - break; - } - } - - if (is_axis == 0) { - offset = offset * (size_t)(dims[idx]) + (size_t)(iter[idx]); - } - } - return offset; -} - -static inline int NextIndex(int num_dims, const int *dims, int *current) { - int carry = 1; - for (int idx = num_dims - 1; idx >= 0; --idx) { - int current_val = current[idx] + carry; - if (dims[idx] == current_val) { - current[idx] = 0; - } else { - current[idx] = current_val; - carry = 0; - break; - } - } - return (carry == 0); -} - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_FP32_GRAD_UTILS_H_ diff --git a/mindspore/lite/nnacl/gelu_parameter.h b/mindspore/lite/nnacl/gelu_parameter.h deleted file mode 100644 index 7a16900665..0000000000 --- a/mindspore/lite/nnacl/gelu_parameter.h +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_GELU_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_GELU_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct GeLUParameter { - // Primitive parameter - OpParameter op_parameter_; - bool approximate_; -} GeLUParameter; - -#endif // MINDSPORE_LITE_NNACL_GELU_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/gru_parameter.h b/mindspore/lite/nnacl/gru_parameter.h deleted file mode 100644 index fdea2c2986..0000000000 --- a/mindspore/lite/nnacl/gru_parameter.h +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_GRU_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_GRU_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct GruParameter { - // Primitive parameter - OpParameter op_parameter_; - // shape correlative - int input_size_; - int hidden_size_; // output_size - int seq_len_; - int batch_; - // other parameter - int output_step_; - bool bidirectional_; - int input_row_align_; - int input_col_align_; - int state_row_align_; - int state_col_align_; -} GruParameter; - -#endif // MINDSPORE_LITE_NNACL_GRU_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/infer/adam_infer.h b/mindspore/lite/nnacl/infer/adam_infer.h deleted file mode 100644 index f4ec666813..0000000000 --- a/mindspore/lite/nnacl/infer/adam_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_ADAM_INFER_H -#define MINDSPORE_LITE_NNACL_ADAM_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int AdamInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_ADAM_INFER_H diff --git a/mindspore/lite/nnacl/infer/add_sub_grad_infer.h b/mindspore/lite/nnacl/infer/add_sub_grad_infer.h deleted file mode 100644 index 4d3b959b42..0000000000 --- a/mindspore/lite/nnacl/infer/add_sub_grad_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_ADD_SUB_GRAD_INFER_H -#define MINDSPORE_LITE_NNACL_ADD_SUB_GRAD_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int AddSubGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_ADD_SUB_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/addn_infer.h b/mindspore/lite/nnacl/infer/addn_infer.h deleted file mode 100644 index 76f34944e8..0000000000 --- a/mindspore/lite/nnacl/infer/addn_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_ADDN_INFER_H -#define MINDSPORE_LITE_NNACL_ADDN_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int AddnInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_ADDN_INFER_H diff --git a/mindspore/lite/nnacl/infer/apply_momentum_infer.h b/mindspore/lite/nnacl/infer/apply_momentum_infer.h deleted file mode 100644 index a377b3a5e0..0000000000 --- a/mindspore/lite/nnacl/infer/apply_momentum_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_APPLY_MOMENTUM_INFER_H -#define MINDSPORE_LITE_NNACL_APPLY_MOMENTUM_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ApplyMomentumInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_APPLY_MOMENTUM_INFER_H diff --git a/mindspore/lite/nnacl/infer/argmin_max_infer.h b/mindspore/lite/nnacl/infer/argmin_max_infer.h deleted file mode 100644 index 42726b3c57..0000000000 --- a/mindspore/lite/nnacl/infer/argmin_max_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_ARGMAX_INFER_H -#define MINDSPORE_LITE_NNACL_ARGMAX_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/arg_min_max_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ArgMinMaxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_ARGMAX_INFER_H diff --git a/mindspore/lite/nnacl/infer/arithmetic_compare_infer.h b/mindspore/lite/nnacl/infer/arithmetic_compare_infer.h deleted file mode 100644 index 2934cdce95..0000000000 --- a/mindspore/lite/nnacl/infer/arithmetic_compare_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_ARITHMETIC_COMPARE_INFER_H -#define MINDSPORE_LITE_NNACL_ARITHMETIC_COMPARE_INFER_H - -#include "nnacl/infer/arithmetic_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ArithmeticCompareInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, - size_t outputs_size, OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_ARITHMETIC_COMPARE_INFER_H diff --git a/mindspore/lite/nnacl/infer/arithmetic_grad_infer.h b/mindspore/lite/nnacl/infer/arithmetic_grad_infer.h deleted file mode 100644 index bdb1dbfbf1..0000000000 --- a/mindspore/lite/nnacl/infer/arithmetic_grad_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INFER_ARITHMETIC_GRAD_INFER_H_ -#define MINDSPORE_LITE_NNACL_INFER_ARITHMETIC_GRAD_INFER_H_ - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ArithmeticGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_INFER_ARITHMETIC_GRAD_INFER_H_ diff --git a/mindspore/lite/nnacl/infer/arithmetic_infer.h b/mindspore/lite/nnacl/infer/arithmetic_infer.h deleted file mode 100644 index c7ee565643..0000000000 --- a/mindspore/lite/nnacl/infer/arithmetic_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_ARITHMETIC_INFER_H -#define MINDSPORE_LITE_NNACL_ARITHMETIC_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/arithmetic.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ArithmeticInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outpus_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_ARITHMETIC_INFER_H diff --git a/mindspore/lite/nnacl/infer/assert_op_infer.h b/mindspore/lite/nnacl/infer/assert_op_infer.h deleted file mode 100644 index 4e03466f11..0000000000 --- a/mindspore/lite/nnacl/infer/assert_op_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_ASSERT_OP_INFER_H -#define MINDSPORE_LITE_NNACL_ASSERT_OP_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int AssertOpInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_ASSERT_OP_INFER_H diff --git a/mindspore/lite/nnacl/infer/assign_add_infer.h b/mindspore/lite/nnacl/infer/assign_add_infer.h deleted file mode 100644 index 0290e88b57..0000000000 --- a/mindspore/lite/nnacl/infer/assign_add_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_ASSIGN_ADD_INFER_H -#define MINDSPORE_LITE_NNACL_ASSIGN_ADD_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int AssignAddInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_ASSIGN_ADD_INFER_H diff --git a/mindspore/lite/nnacl/infer/assign_infer.h b/mindspore/lite/nnacl/infer/assign_infer.h deleted file mode 100644 index fe276b79e3..0000000000 --- a/mindspore/lite/nnacl/infer/assign_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_ASSIGN_INFER_H -#define MINDSPORE_LITE_NNACL_ASSIGN_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int AssignInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_ASSIGN_INFER_H diff --git a/mindspore/lite/nnacl/infer/audio_spectrogram_infer.h b/mindspore/lite/nnacl/infer/audio_spectrogram_infer.h deleted file mode 100644 index 030883c8b6..0000000000 --- a/mindspore/lite/nnacl/infer/audio_spectrogram_infer.h +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_AUDIO_SPECTROGRAM_INFER_H -#define MINDSPORE_LITE_NNACL_AUDIO_SPECTROGRAM_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct AudioSpectrogramParameter { - OpParameter op_parameter_; - int window_size_; - int stride_; -} AudioSpectrogramParameter; - -int AudioSpectrogramInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_AUDIO_SPECTROGRAM_INFER_H diff --git a/mindspore/lite/nnacl/infer/batch_to_space_infer.h b/mindspore/lite/nnacl/infer/batch_to_space_infer.h deleted file mode 100644 index 261a1f76bf..0000000000 --- a/mindspore/lite/nnacl/infer/batch_to_space_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_BATCH_TO_SPACE_INFER_H -#define MINDSPORE_LITE_NNACL_BATCH_TO_SPACE_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/batch_to_space.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int BatchToSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_BATCH_TO_SPACE_INFER_H diff --git a/mindspore/lite/nnacl/infer/bias_grad_infer.h b/mindspore/lite/nnacl/infer/bias_grad_infer.h deleted file mode 100644 index 2b40694d09..0000000000 --- a/mindspore/lite/nnacl/infer/bias_grad_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_BIAS_GRAD_INFER_H -#define MINDSPORE_LITE_NNACL_BIAS_GRAD_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int BiasGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_BIAS_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/binary_cross_entropy_infer.h b/mindspore/lite/nnacl/infer/binary_cross_entropy_infer.h deleted file mode 100644 index 6727303255..0000000000 --- a/mindspore/lite/nnacl/infer/binary_cross_entropy_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_BINARY_CROSS_ENTROPY_INFER_H -#define MINDSPORE_LITE_NNACL_BINARY_CROSS_ENTROPY_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/fp32_grad/binary_cross_entropy.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int BinaryCrossEntropyInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, - size_t outputs_size, OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_BINARY_CROSS_ENTROPY_INFER_H diff --git a/mindspore/lite/nnacl/infer/bn_grad_infer.h b/mindspore/lite/nnacl/infer/bn_grad_infer.h deleted file mode 100644 index a28f5b2f55..0000000000 --- a/mindspore/lite/nnacl/infer/bn_grad_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_BN_GRAD_INFER_H -#define MINDSPORE_LITE_NNACL_BN_GRAD_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int BnGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_BN_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/broadcast_to_infer.h b/mindspore/lite/nnacl/infer/broadcast_to_infer.h deleted file mode 100644 index a7b8630a7a..0000000000 --- a/mindspore/lite/nnacl/infer/broadcast_to_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_BROADCAST_TO_INFER_H -#define MINDSPORE_LITE_NNACL_BROADCAST_TO_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/fp32/broadcast_to_fp32.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int BroadcastToInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outpus_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_BROADCAST_TO_INFER_H diff --git a/mindspore/lite/nnacl/infer/cast_infer.h b/mindspore/lite/nnacl/infer/cast_infer.h deleted file mode 100644 index 6c669c7ca0..0000000000 --- a/mindspore/lite/nnacl/infer/cast_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_CAST_INFER_H -#define MINDSPORE_LITE_NNACL_CAST_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int CastInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_CAST_INFER_H diff --git a/mindspore/lite/nnacl/infer/common_infer.h b/mindspore/lite/nnacl/infer/common_infer.h deleted file mode 100644 index b7489a89b6..0000000000 --- a/mindspore/lite/nnacl/infer/common_infer.h +++ /dev/null @@ -1,212 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_COMMON_H_ -#define MINDSPORE_LITE_NNACL_COMMON_H_ - -#include -#include "nnacl/errorcode.h" -#include "nnacl/op_base.h" -#include "nnacl/tensor_c.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define kNCHW_N 0 -#define kNCHW_C 1 -#define kNCHW_H 2 -#define kNCHW_W 3 - -typedef enum FormatC { - Format_NCHW = 0, - Format_NHWC = 1, - Format_NHWC4 = 2, - Format_HWKC = 3, - Format_HWCK = 4, - Format_KCHW = 5, - Format_CKHW = 6, - Format_KHWC = 7, - Format_CHWK = 8, - Format_HW = 9, - Format_HW4 = 10, - Format_NC = 11, - Format_NC4 = 12, - Format_NC4HW4 = 13, - Format_NUM_OF_FORMAT = 14, - Format_MIN = Format_NCHW, - Format_MAX = Format_NUM_OF_FORMAT -} FormatC; - -typedef enum TypeIdC { - kTypeUnknown = 0, - kMetaTypeBegin = kTypeUnknown, - kMetaTypeType, // Type - kMetaTypeAnything, - kMetaTypeObject, - kMetaTypeTypeType, // TypeType - kMetaTypeProblem, - kMetaTypeExternal, - kMetaTypeNone, - kMetaTypeNull, - kMetaTypeEllipsis, - kMetaTypeEnd, - // - // Object types - // - kObjectTypeBegin = kMetaTypeEnd, - kObjectTypeNumber, - kObjectTypeString, - kObjectTypeList, - kObjectTypeTuple, - kObjectTypeSlice, - kObjectTypeKeyword, - kObjectTypeTensorType, - kObjectTypeRowTensorType, - kObjectTypeSparseTensorType, - kObjectTypeUndeterminedType, - kObjectTypeClass, - kObjectTypeDictionary, - kObjectTypeFunction, - kObjectTypeJTagged, - kObjectTypeSymbolicKeyType, - kObjectTypeEnvType, - kObjectTypeRefKey, - kObjectTypeRef, - kObjectTypeEnd, - // - // Number Types - // - kNumberTypeBegin = kObjectTypeEnd, - kNumberTypeBool, - kNumberTypeInt, - kNumberTypeInt8, - kNumberTypeInt16, - kNumberTypeInt32, - kNumberTypeInt64, - kNumberTypeUInt, - kNumberTypeUInt8, - kNumberTypeUInt16, - kNumberTypeUInt32, - kNumberTypeUInt64, - kNumberTypeFloat, - kNumberTypeFloat16, - kNumberTypeFloat32, - kNumberTypeFloat64, - kNumberTypeComplex64, - kNumberTypeEnd -} TypeIdC; - -enum NNACLLshProjectionType { - LshProjectionType_UNKNOWN = 0, - LshProjectionType_SPARSE = 1, - LshProjectionType_DENSE = 2, - LshProjectionType_MIN = LshProjectionType_UNKNOWN, - LshProjectionType_MAX = LshProjectionType_DENSE -}; - -enum NNACLQuantType { - QuantType_QUANT_NONE = 0, - QuantType_AwareTraining = 1, - QuantType_WeightQuant = 2, - QuantType_PostTraining = 3, - QuantType_QUANT_WEIGHT = 4, - QuantType_QUANT_ALL = 5, - QuantType_MIN = QuantType_QUANT_NONE, - QuantType_MAX = QuantType_QUANT_ALL -}; - -typedef struct vvector { - int **shape_; // value of shapes - int *shape_size_; // size of shape - size_t size_; // number of shapes -} vvector; - -typedef struct TensorListC { - bool is_ready_; - int data_type_; - int format_; - - int tensors_data_type_; // element_data_type_, keep same as c++ - int max_elements_num_; - int element_shape_[8]; - size_t element_num_; - size_t element_shape_size_; - TensorC *tensors_; -} TensorListC; - -typedef struct VectorC { - int *data_; - size_t size_; - size_t max_size_; - size_t per_malloc_size_; -} VectorC; - -int MallocTensorListData(TensorListC *tensor_list, TypeIdC dtype, vvector *tensor_shape); -int TensorListMergeShape(int *element_shape, size_t *element_shape_size, const int *tmp, size_t tmp_size); -bool TensorListIsFullyDefined(int *shape, size_t shape_size); - -int GetBatch(const TensorC *tensor); -int GetHeight(const TensorC *tensor); -int GetWidth(const TensorC *tensor); -int GetChannel(const TensorC *tensor); -int GetElementNum(const TensorC *tensor); -int GetDimensionSize(const TensorC *tensor, const size_t index); - -int CheckAugmentNull(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); -int CheckAugmentNullSize(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter, size_t inputs_size_obj, size_t outputs_size_obj); -int CheckAugmentNullSizeInputTwo(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, - size_t outputs_size, OpParameter *parameter, size_t inputs_size_obj_0, - size_t inputs_size_obj_1, size_t outputs_size_obj); -int CheckAugmentNullInputSize(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter, size_t inputs_size_obj); -int CheckAugmentNullOutputSize(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter, size_t outputs_size_obj); -void SetDataTypeFormat(TensorC *dst, const TensorC *src); - -int SetShapeTensor(TensorC *dst, const TensorC *src); -int SetShapeArray(TensorC *dst, int *src, size_t src_size); -int ShapeSet(int *dst_shape, size_t *dst_shape_size, const int *src_shape, size_t src_shape_size); -int ShapePush(int *shape, size_t *shape_size, int value); -int ShapeInsert(int *shape, size_t *shape_size, int index, int value); -int ShapeErase(int *shape, size_t *shape_size, int index); -bool ShapeEqual(const int *shape0, size_t shape0_size, const int *shape1, size_t shape1_size); - -void iswap(int *a, int *b); - -int imin(int a, int b); -int imax(int a, int b); - -int CommonInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); -int FftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -int VectorCInit(VectorC *vc, size_t per_malloc_size); -void VectorCSet(VectorC *vc, const int *src_shape, size_t src_shape_size); -void VectorCPush(VectorC *vc, int value); -void VectorCInsert(VectorC *vc, int index, int value); -void VectorCErase(VectorC *vc, int index); -bool VectorCEqual(VectorC *vc1, VectorC *vc2); -void VectorCFree(VectorC *vc); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_COMMON__H_ diff --git a/mindspore/lite/nnacl/infer/concat_infer.h b/mindspore/lite/nnacl/infer/concat_infer.h deleted file mode 100644 index 08f3b8ff78..0000000000 --- a/mindspore/lite/nnacl/infer/concat_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_CONCAT_INFER_H -#define MINDSPORE_LITE_NNACL_CONCAT_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/concat_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ConcatInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_CONCAT_INFER_H diff --git a/mindspore/lite/nnacl/infer/constant_of_shape_infer.h b/mindspore/lite/nnacl/infer/constant_of_shape_infer.h deleted file mode 100644 index 4fb53ec4d3..0000000000 --- a/mindspore/lite/nnacl/infer/constant_of_shape_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_CONSTANT_OF_SHAPE_INFER_H -#define MINDSPORE_LITE_NNACL_CONSTANT_OF_SHAPE_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/constant_of_shape_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ConstantOfShapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_CONSTANT_OF_SHAPE_INFER_H diff --git a/mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.h b/mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.h deleted file mode 100644 index 2fa82c41de..0000000000 --- a/mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_CONV2D_GRAD_FILTER_INFER_H -#define MINDSPORE_LITE_NNACL_CONV2D_GRAD_FILTER_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/conv_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int Conv2dGradFilterInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_CONV2D_GRAD_FILTER_INFER_H diff --git a/mindspore/lite/nnacl/infer/conv2d_grad_input_infer.h b/mindspore/lite/nnacl/infer/conv2d_grad_input_infer.h deleted file mode 100644 index 4ea80be53f..0000000000 --- a/mindspore/lite/nnacl/infer/conv2d_grad_input_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_CONV2D_GRAD_INPUT_INFER_H -#define MINDSPORE_LITE_NNACL_CONV2D_GRAD_INPUT_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/conv_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int Conv2dGradInputInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_CONV2D_GRAD_INPUT_INFER_H diff --git a/mindspore/lite/nnacl/infer/conv2d_infer.h b/mindspore/lite/nnacl/infer/conv2d_infer.h deleted file mode 100644 index ee0d291b6a..0000000000 --- a/mindspore/lite/nnacl/infer/conv2d_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_CONV2D_INFER_H -#define MINDSPORE_LITE_NNACL_CONV2D_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/conv_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int Conv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_CONV2D_INFER_H diff --git a/mindspore/lite/nnacl/infer/crop_and_resize_infer.h b/mindspore/lite/nnacl/infer/crop_and_resize_infer.h deleted file mode 100644 index 0d0858839a..0000000000 --- a/mindspore/lite/nnacl/infer/crop_and_resize_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_CROP_AND_RESIZE_INFER_H -#define MINDSPORE_LITE_NNACL_CROP_AND_RESIZE_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int CropAndResizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_CROP_AND_RESIZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/crop_infer.h b/mindspore/lite/nnacl/infer/crop_infer.h deleted file mode 100644 index dd6de645f3..0000000000 --- a/mindspore/lite/nnacl/infer/crop_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_CROP_INFER_H -#define MINDSPORE_LITE_NNACL_CROP_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/crop_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int CropInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_CROP_INFER_H diff --git a/mindspore/lite/nnacl/infer/custom_extract_features_infer.h b/mindspore/lite/nnacl/infer/custom_extract_features_infer.h deleted file mode 100644 index af518e60ce..0000000000 --- a/mindspore/lite/nnacl/infer/custom_extract_features_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_CUSTOM_EXTRACT_FEATURES_INFER_H -#define MINDSPORE_LITE_NNACL_CUSTOM_EXTRACT_FEATURES_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int CustomExtractFeaturesInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, - size_t outputs_size, OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_CUSTOM_EXTRACT_FEATURES_INFER_H diff --git a/mindspore/lite/nnacl/infer/custom_normalize_infer.h b/mindspore/lite/nnacl/infer/custom_normalize_infer.h deleted file mode 100644 index 6fe40cfc51..0000000000 --- a/mindspore/lite/nnacl/infer/custom_normalize_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_CUSTOM_NORMALIZE_INFER_H -#define MINDSPORE_LITE_NNACL_CUSTOM_NORMALIZE_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/softmax_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int CustomNormalizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_CUSTOM_NORMALIZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/custom_predict_infer.h b/mindspore/lite/nnacl/infer/custom_predict_infer.h deleted file mode 100644 index 4df7628e5e..0000000000 --- a/mindspore/lite/nnacl/infer/custom_predict_infer.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_CUSTOM_PREDICT_INFER_H -#define MINDSPORE_LITE_NNACL_CUSTOM_PREDICT_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct CustomPredictParameter { - OpParameter op_parameter_; - int output_num; -} CustomPredictParameter; - -int CustomPredictInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_CUSTOM_PREDICT_INFER_H diff --git a/mindspore/lite/nnacl/infer/deconv2d_infer.h b/mindspore/lite/nnacl/infer/deconv2d_infer.h deleted file mode 100644 index 0563a9c6e9..0000000000 --- a/mindspore/lite/nnacl/infer/deconv2d_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_DECONV2D_INFER_H -#define MINDSPORE_LITE_NNACL_DECONV2D_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/conv_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int Deconv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_DECONV2D_INFER_H diff --git a/mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.h b/mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.h deleted file mode 100644 index 59f295e141..0000000000 --- a/mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_DEDEPTHWISE_CONV2D_INFER_H -#define MINDSPORE_LITE_NNACL_DEDEPTHWISE_CONV2D_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/conv_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int DeDepthwiseConv2DInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, - size_t outputs_size, OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_DEDEPTHWISE_CONV2D_INFER_H diff --git a/mindspore/lite/nnacl/infer/depth_to_space_infer.h b/mindspore/lite/nnacl/infer/depth_to_space_infer.h deleted file mode 100644 index be114f56e8..0000000000 --- a/mindspore/lite/nnacl/infer/depth_to_space_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_DEPTHTOSPACE_INFER_H -#define MINDSPORE_LITE_NNACL_DEPTHTOSPACE_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/depth_to_space_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int DepthToSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_DEPTHTOSPACE_INFER_H diff --git a/mindspore/lite/nnacl/infer/depthwise_conv2d_infer.h b/mindspore/lite/nnacl/infer/depthwise_conv2d_infer.h deleted file mode 100644 index 799279a1c7..0000000000 --- a/mindspore/lite/nnacl/infer/depthwise_conv2d_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_DEPTHWISE_CONV2D_INFER_H -#define MINDSPORE_LITE_NNACL_DEPTHWISE_CONV2D_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/conv_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int DepthwiseConv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_DEPTHWISE_CONV2D_INFER_H diff --git a/mindspore/lite/nnacl/infer/detection_post_process_infer.h b/mindspore/lite/nnacl/infer/detection_post_process_infer.h deleted file mode 100644 index f5ac10500f..0000000000 --- a/mindspore/lite/nnacl/infer/detection_post_process_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_DETECTION_POST_PROCESS_INFER_H -#define MINDSPORE_LITE_NNACL_DETECTION_POST_PROCESS_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/detection_post_process_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int DetectionPostProcessInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, - size_t outputs_size, OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_DETECTION_POST_PROCESS_INFER_H diff --git a/mindspore/lite/nnacl/infer/dropout_grad_infer.h b/mindspore/lite/nnacl/infer/dropout_grad_infer.h deleted file mode 100644 index b88bfe11da..0000000000 --- a/mindspore/lite/nnacl/infer/dropout_grad_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_DROPOUT_GRAD_INFER_H -#define MINDSPORE_LITE_NNACL_DROPOUT_GRAD_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int DropoutGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_DROPOUT_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/dropout_infer.h b/mindspore/lite/nnacl/infer/dropout_infer.h deleted file mode 100644 index 9e13f939c4..0000000000 --- a/mindspore/lite/nnacl/infer/dropout_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_DROPOUT_INFER_H -#define MINDSPORE_LITE_NNACL_DROPOUT_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int DropoutInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_DROPOUT_INFER_H diff --git a/mindspore/lite/nnacl/infer/embedding_lookup_infer.h b/mindspore/lite/nnacl/infer/embedding_lookup_infer.h deleted file mode 100644 index 642cf2e65a..0000000000 --- a/mindspore/lite/nnacl/infer/embedding_lookup_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_EMBEDDING_LOOKUP_INFER_H -#define MINDSPORE_LITE_NNACL_EMBEDDING_LOOKUP_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int EmbeddingLookupInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_EMBEDDING_LOOKUP_INFER_H diff --git a/mindspore/lite/nnacl/infer/expand_dims_infer.h b/mindspore/lite/nnacl/infer/expand_dims_infer.h deleted file mode 100644 index 9005d75d13..0000000000 --- a/mindspore/lite/nnacl/infer/expand_dims_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_EXPAND_DIMS_INFER_H -#define MINDSPORE_LITE_NNACL_EXPAND_DIMS_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ExpandDimsInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_EXPAND_DIMS_INFER_H diff --git a/mindspore/lite/nnacl/infer/fft_imag_infer.h b/mindspore/lite/nnacl/infer/fft_imag_infer.h deleted file mode 100644 index df816e6397..0000000000 --- a/mindspore/lite/nnacl/infer/fft_imag_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FFT_IMAG_INFER_H -#define MINDSPORE_LITE_NNACL_FFT_IMAG_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int FftImagInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_FFT_IMAG_INFER_H diff --git a/mindspore/lite/nnacl/infer/fft_real_infer.h b/mindspore/lite/nnacl/infer/fft_real_infer.h deleted file mode 100644 index b3410ead4d..0000000000 --- a/mindspore/lite/nnacl/infer/fft_real_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FFT_REAL_INFER_H -#define MINDSPORE_LITE_NNACL_FFT_REAL_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int FftRealInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_FFT_REAL_INFER_H diff --git a/mindspore/lite/nnacl/infer/fill_infer.h b/mindspore/lite/nnacl/infer/fill_infer.h deleted file mode 100644 index 535a7d84a3..0000000000 --- a/mindspore/lite/nnacl/infer/fill_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FILL_INFER_H -#define MINDSPORE_LITE_NNACL_FILL_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int FillInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_FILL_INFER_H diff --git a/mindspore/lite/nnacl/infer/flatten_grad_infer.h b/mindspore/lite/nnacl/infer/flatten_grad_infer.h deleted file mode 100644 index 532ebe591d..0000000000 --- a/mindspore/lite/nnacl/infer/flatten_grad_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FLATTEN_GRAD_INFER_INFER_H -#define MINDSPORE_LITE_NNACL_FLATTEN_GRAD_INFER_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int FlattenGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_FLATTEN_GRAD_INFER_INFER_H diff --git a/mindspore/lite/nnacl/infer/flatten_infer.h b/mindspore/lite/nnacl/infer/flatten_infer.h deleted file mode 100644 index f71e25829d..0000000000 --- a/mindspore/lite/nnacl/infer/flatten_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FLATTEN_INFER_H -#define MINDSPORE_LITE_NNACL_FLATTEN_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int FlattenInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_FLATTEN_INFER_H diff --git a/mindspore/lite/nnacl/infer/full_connection_infer.h b/mindspore/lite/nnacl/infer/full_connection_infer.h deleted file mode 100644 index dc3ef3cfa8..0000000000 --- a/mindspore/lite/nnacl/infer/full_connection_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FULL_CONNECTION_INFER_H -#define MINDSPORE_LITE_NNACL_FULL_CONNECTION_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/matmul_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int FullConnectionInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_FULL_CONNECTION_INFER_H diff --git a/mindspore/lite/nnacl/infer/fused_batchnorm_infer.h b/mindspore/lite/nnacl/infer/fused_batchnorm_infer.h deleted file mode 100644 index a90de7f459..0000000000 --- a/mindspore/lite/nnacl/infer/fused_batchnorm_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_FUSED_BATCHNORM_INFER_H -#define MINDSPORE_LITE_NNACL_FUSED_BATCHNORM_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int FusedBatchNormInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_FUSED_BATCHNORM_INFER_H diff --git a/mindspore/lite/nnacl/infer/gather_infer.h b/mindspore/lite/nnacl/infer/gather_infer.h deleted file mode 100644 index b83028addb..0000000000 --- a/mindspore/lite/nnacl/infer/gather_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_GATHER_INFER_H -#define MINDSPORE_LITE_NNACL_GATHER_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/gather_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int GatherInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_GATHER_INFER_H diff --git a/mindspore/lite/nnacl/infer/gather_nd_infer.h b/mindspore/lite/nnacl/infer/gather_nd_infer.h deleted file mode 100644 index 69c804f1d0..0000000000 --- a/mindspore/lite/nnacl/infer/gather_nd_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_GATHER_ND_INFER_H -#define MINDSPORE_LITE_NNACL_GATHER_ND_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/fp32/gatherNd_fp32.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int GatherNdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_GATHER_ND_INFER_H diff --git a/mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.h b/mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.h deleted file mode 100644 index 672924a092..0000000000 --- a/mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_GROUP_CONV2D_GRAD_INPUT_INFER_H -#define MINDSPORE_LITE_NNACL_GROUP_CONV2D_GRAD_INPUT_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/conv_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int GroupConv2dGradInputInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, - size_t outputs_size, OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_GROUP_CONV2D_GRAD_INPUT_INFER_H diff --git a/mindspore/lite/nnacl/infer/gru_infer.h b/mindspore/lite/nnacl/infer/gru_infer.h deleted file mode 100644 index 448c49ca94..0000000000 --- a/mindspore/lite/nnacl/infer/gru_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_GRU_INFER_H -#define MINDSPORE_LITE_NNACL_GRU_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/gru_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int GruInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_GRU_INFER_H diff --git a/mindspore/lite/nnacl/infer/hashtable_lookup_infer.h b/mindspore/lite/nnacl/infer/hashtable_lookup_infer.h deleted file mode 100644 index 304e97a3e2..0000000000 --- a/mindspore/lite/nnacl/infer/hashtable_lookup_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_HASHTABLE_LOOKUP_INFER_H -#define MINDSPORE_LITE_NNACL_HASHTABLE_LOOKUP_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int HashtableLoopupInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_HASHTABLE_LOOKUP_INFER_H diff --git a/mindspore/lite/nnacl/infer/infer.h b/mindspore/lite/nnacl/infer/infer.h deleted file mode 100644 index ba89bedbae..0000000000 --- a/mindspore/lite/nnacl/infer/infer.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INFER_INFER_H_ -#define MINDSPORE_LITE_NNACL_INFER_INFER_H_ - -#include "nnacl/tensor_c.h" -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif -typedef int (*InferShape)(const TensorC *const *inputs, size_t input_size, TensorC **outputs, size_t output_size, - OpParameter *parameter); - -InferShape GetInferFunc(int prim_type); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_INFER_INFER_H_ diff --git a/mindspore/lite/nnacl/infer/infer_register.h b/mindspore/lite/nnacl/infer/infer_register.h deleted file mode 100644 index 8d66b12b28..0000000000 --- a/mindspore/lite/nnacl/infer/infer_register.h +++ /dev/null @@ -1,233 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INFER_INFER_REGISTER_H_ -#define MINDSPORE_LITE_NNACL_INFER_INFER_REGISTER_H_ - -#include "nnacl/tensor_c.h" -#include "nnacl/op_base.h" -#include "nnacl/infer/infer.h" - -#ifdef __cplusplus -extern "C" { -#endif -enum PrimType { - PrimType_NONE = 0, - PrimType_Abs = 1, - PrimType_Activation = 2, - PrimType_ActivationGrad = 3, - PrimType_Adam = 4, - PrimType_AddFusion = 5, - PrimType_AdderFusion = 6, - PrimType_AddGrad = 7, - PrimType_AddN = 8, - PrimType_All = 9, - PrimType_ApplyMomentum = 10, - PrimType_ArgMaxFusion = 11, - PrimType_ArgMinFusion = 12, - PrimType_Assert = 13, - PrimType_Assign = 14, - PrimType_AssignAdd = 15, - PrimType_AudioSpectrogram = 16, - PrimType_AvgPoolFusion = 17, - PrimType_AvgPoolGrad = 18, - PrimType_BatchNorm = 19, - PrimType_BatchNormGrad = 20, - PrimType_BatchToSpace = 21, - PrimType_BatchToSpaceND = 22, - PrimType_BiasAdd = 23, - PrimType_BinaryCrossEntropy = 24, - PrimType_BinaryCrossEntropyGrad = 25, - PrimType_BiasAddGrad = 26, - PrimType_BroadcastTo = 27, - PrimType_Cast = 28, - PrimType_Ceil = 29, - PrimType_Clip = 30, - PrimType_Concat = 31, - PrimType_ControlDepend = 32, - PrimType_Conv2DBackpropFilterFusion = 33, - PrimType_Conv2DBackpropInputFusion = 34, - PrimType_Conv2DFusion = 35, - PrimType_Conv2dTransposeFusion = 36, - PrimType_Cos = 37, - PrimType_ConstantOfShape = 38, - PrimType_Crop = 39, - PrimType_CustomExtractFeatures = 40, - PrimType_CustomNormalize = 41, - PrimType_CustomPredict = 42, - PrimType_DeConv2DGradFilter = 43, - PrimType_Depend = 44, - PrimType_DepthToSpace = 45, - PrimType_DetectionPostProcess = 46, - PrimType_DivFusion = 47, - PrimType_DivGrad = 48, - PrimType_Dropout = 49, - PrimType_DropoutGrad = 50, - PrimType_Elu = 51, - PrimType_Eltwise = 52, - PrimType_Equal = 53, - PrimType_EmbeddingLookupFusion = 54, - PrimType_ExpFusion = 55, - PrimType_ExpandDims = 56, - PrimType_FakeQuantWithMinMaxVars = 57, - PrimType_FakeQuantWithMinMaxVarsPerChannel = 58, - PrimType_FftReal = 59, - PrimType_FftImag = 60, - PrimType_Flatten = 61, - PrimType_FlattenGrad = 62, - PrimType_Floor = 63, - PrimType_FloorDiv = 64, - PrimType_FloorMod = 65, - PrimType_Fill = 66, - PrimType_FullConnection = 67, - PrimType_FusedBatchNorm = 68, - PrimType_Gather = 69, - PrimType_GatherNd = 70, - PrimType_Greater = 71, - PrimType_GreaterEqual = 72, - PrimType_HashtableLookup = 73, - PrimType_InstanceNorm = 74, - PrimType_LayerNormFusion = 75, - PrimType_LeakyRelu = 76, - PrimType_Less = 77, - PrimType_LessEqual = 78, - PrimType_Log = 79, - PrimType_LogGrad = 80, - PrimType_LogicalAnd = 81, - PrimType_LogicalNot = 82, - PrimType_LogicalOr = 83, - PrimType_LpNormalization = 84, - PrimType_LRN = 85, - PrimType_LshProjection = 86, - PrimType_LSTM = 87, - PrimType_L2NormalizeFusion = 88, - PrimType_MatMul = 89, - PrimType_Maximum = 90, - PrimType_MaximumGrad = 91, - PrimType_MaxPoolFusion = 92, - PrimType_MaxPoolGrad = 93, - PrimType_Merge = 94, - PrimType_Mfcc = 95, - PrimType_Minimum = 96, - PrimType_MinimumGrad = 97, - PrimType_Mod = 98, - PrimType_MulFusion = 99, - PrimType_MulGrad = 100, - PrimType_Neg = 101, - PrimType_NegGrad = 102, - PrimType_NotEqual = 103, - PrimType_NonMaxSuppression = 104, - PrimType_OneHot = 105, - PrimType_OnesLike = 106, - PrimType_PadFusion = 107, - PrimType_PartialFusion = 108, - PrimType_PowerGrad = 109, - PrimType_PowFusion = 110, - PrimType_PriorBox = 111, - PrimType_PReLUFusion = 112, - PrimType_QuantDTypeCast = 113, - PrimType_Rank = 114, - PrimType_Range = 115, - PrimType_Reciprocal = 116, - PrimType_RealDiv = 117, - PrimType_ReduceFusion = 118, - PrimType_Reshape = 119, - PrimType_Resize = 120, - PrimType_ReverseSequence = 121, - PrimType_ReverseV2 = 122, - PrimType_Rfft = 123, - PrimType_ROIPooling = 124, - PrimType_Round = 125, - PrimType_Rsqrt = 126, - PrimType_ScaleFusion = 127, - PrimType_ScatterNd = 128, - PrimType_SGD = 129, - PrimType_Shape = 130, - PrimType_SigmoidCrossEntropyWithLogits = 131, - PrimType_SigmoidCrossEntropyWithLogitsGrad = 132, - PrimType_Sin = 133, - PrimType_SkipGram = 134, - PrimType_SliceFusion = 135, - PrimType_SmoothL1Loss = 136, - PrimType_SmoothL1LossGrad = 137, - PrimType_Softmax = 138, - PrimType_SoftmaxCrossEntropyWithLogits = 139, - PrimType_SpaceToBatch = 140, - PrimType_SpaceToBatchND = 141, - PrimType_SpaceToDepth = 142, - PrimType_SparseSoftmaxCrossEntropyWithLogits = 143, - PrimType_SparseToDense = 144, - PrimType_Split = 145, - PrimType_Sqrt = 146, - PrimType_Squeeze = 147, - PrimType_Square = 148, - PrimType_SquaredDifference = 149, - PrimType_Stack = 150, - PrimType_StridedSlice = 151, - PrimType_SubFusion = 152, - PrimType_SubGrad = 153, - PrimType_Switch = 154, - PrimType_TensorListFromTensor = 155, - PrimType_TensorListGetItem = 156, - PrimType_TensorListReserve = 157, - PrimType_TensorListSetItem = 158, - PrimType_TensorListStack = 159, - PrimType_TileFusion = 160, - PrimType_TopKFusion = 161, - PrimType_Transpose = 162, - PrimType_Unique = 163, - PrimType_UnsortedSegmentSum = 164, - PrimType_Unsqueeze = 165, - PrimType_Unstack = 166, - PrimType_While = 167, - PrimType_Where = 168, - PrimType_ZerosLike = 169, - PrimType_Select = 170, - PrimType_If = 171, - PrimType_GRU = 172, - PrimType_NonZero = 173, - PrimType_InvertPermutation = 174, - PrimType_Size = 175, - PrimType_RandomStandardNormal = 176, - PrimType_CropAndResize = 177, - PrimType_Erf = 178, - PrimType_StridedSliceGrad = 179, - PrimType_IsFinite = 180, - PrimType_LinSpace = 181, - PrimType_UniformReal = 182, - PrimType_AbsGrad = 183, - PrimType_RsqrtGrad = 184, - PrimType_SqrtGrad = 185, - PrimType_LayerNormGrad = 186, - PrimType_ResizeGrad = 187, - PrimType_Splice = 188, - PrimType_LogSoftmax = 189, - PrimType_MIN = PrimType_NONE, - PrimType_MAX = PrimType_LogSoftmax + 1 -}; - -void RegInfer(int prim_type, InferShape func); - -#ifdef MS_COMPILE_IOS -#define REG_INFER(op, type, func) \ - void _##op##type() { RegInfer(type, func); } -#else -#define REG_INFER(op, type, func) \ - __attribute__((constructor(102))) void Reg##op##Infer() { RegInfer(type, func); } -#endif -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_INFER_INFER_REGISTER_H_ diff --git a/mindspore/lite/nnacl/infer/invert_permutation_infer.h b/mindspore/lite/nnacl/infer/invert_permutation_infer.h deleted file mode 100644 index fb2f71a9bb..0000000000 --- a/mindspore/lite/nnacl/infer/invert_permutation_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INVERT_PERMUTATION_INFER_H -#define MINDSPORE_LITE_NNACL_INVERT_PERMUTATION_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int InvertPermutationInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, - size_t outputs_size, OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_INVERT_PERMUTATION_INFER_H diff --git a/mindspore/lite/nnacl/infer/layer_norm_grad_infer.h b/mindspore/lite/nnacl/infer/layer_norm_grad_infer.h deleted file mode 100644 index 0e61a1c86c..0000000000 --- a/mindspore/lite/nnacl/infer/layer_norm_grad_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INFER_LAYER_NORM_GRAD_INFER_H_ -#define MINDSPORE_LITE_NNACL_INFER_LAYER_NORM_GRAD_INFER_H_ - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int LayerNormGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_INFER_LAYER_NORM_GRAD_INFER_H_ diff --git a/mindspore/lite/nnacl/infer/layer_norm_infer.h b/mindspore/lite/nnacl/infer/layer_norm_infer.h deleted file mode 100644 index bbc87f7db6..0000000000 --- a/mindspore/lite/nnacl/infer/layer_norm_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_LAYER_NORM_INFER_H -#define MINDSPORE_LITE_NNACL_LAYER_NORM_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/layer_norm_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int LayerNormInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_LAYER_NORM_INFER_H diff --git a/mindspore/lite/nnacl/infer/lin_space_infer.h b/mindspore/lite/nnacl/infer/lin_space_infer.h deleted file mode 100644 index 0568040914..0000000000 --- a/mindspore/lite/nnacl/infer/lin_space_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_LIN_SPACE_INFER_H -#define MINDSPORE_LITE_NNACL_LIN_SPACE_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int LinSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_LIN_SPACE_INFER_H diff --git a/mindspore/lite/nnacl/infer/lsh_projection_infer.h b/mindspore/lite/nnacl/infer/lsh_projection_infer.h deleted file mode 100644 index ffba1443f8..0000000000 --- a/mindspore/lite/nnacl/infer/lsh_projection_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_LSH_PROJECTION_INFER_H -#define MINDSPORE_LITE_NNACL_LSH_PROJECTION_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/lsh_projection_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int LshProjectionInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_LSH_PROJECTION_INFER_H diff --git a/mindspore/lite/nnacl/infer/lstm_infer.h b/mindspore/lite/nnacl/infer/lstm_infer.h deleted file mode 100644 index ea51f01b28..0000000000 --- a/mindspore/lite/nnacl/infer/lstm_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_LSTM_INFER_H -#define MINDSPORE_LITE_NNACL_LSTM_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/fp32/lstm_fp32.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int LstmInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_LSTM_INFER_H diff --git a/mindspore/lite/nnacl/infer/matmul_infer.h b/mindspore/lite/nnacl/infer/matmul_infer.h deleted file mode 100644 index 9091f4e0f4..0000000000 --- a/mindspore/lite/nnacl/infer/matmul_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_MATMUL_INFER_H -#define MINDSPORE_LITE_NNACL_MATMUL_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/matmul_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int MatmulInferShape(const TensorC *const *const inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_MATMUL_INFER_H diff --git a/mindspore/lite/nnacl/infer/max_min_grad_infer.h b/mindspore/lite/nnacl/infer/max_min_grad_infer.h deleted file mode 100644 index 91fc83ee4f..0000000000 --- a/mindspore/lite/nnacl/infer/max_min_grad_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INFER_MAX_MIN_GRAD_INFER_H_ -#define MINDSPORE_LITE_NNACL_INFER_MAX_MIN_GRAD_INFER_H_ - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int MaxMinGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_INFER_MAX_MIN_GRAD_INFER_H_ diff --git a/mindspore/lite/nnacl/infer/mean_infer.h b/mindspore/lite/nnacl/infer/mean_infer.h deleted file mode 100644 index ab83182eb8..0000000000 --- a/mindspore/lite/nnacl/infer/mean_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_MEAN_INFER_H -#define MINDSPORE_LITE_NNACL_MEAN_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/reduce_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int MeanInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_MEAN_INFER_H diff --git a/mindspore/lite/nnacl/infer/merge_infer.h b/mindspore/lite/nnacl/infer/merge_infer.h deleted file mode 100644 index 1437e5439b..0000000000 --- a/mindspore/lite/nnacl/infer/merge_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_MERGE_INFER_H -#define MINDSPORE_LITE_NNACL_MERGE_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/softmax_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int MergeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_MERGE_INFER_H diff --git a/mindspore/lite/nnacl/infer/mfcc_infer.h b/mindspore/lite/nnacl/infer/mfcc_infer.h deleted file mode 100644 index 358deb46a9..0000000000 --- a/mindspore/lite/nnacl/infer/mfcc_infer.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_MFCC_INFER_H -#define MINDSPORE_LITE_NNACL_MFCC_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct MfccParameter { - OpParameter op_parameter_; - int dct_coeff_num_; -} MfccParameter; - -int MfccInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_MFCC_INFER_H diff --git a/mindspore/lite/nnacl/infer/non_max_suppression_infer.h b/mindspore/lite/nnacl/infer/non_max_suppression_infer.h deleted file mode 100644 index bb0cc24d1a..0000000000 --- a/mindspore/lite/nnacl/infer/non_max_suppression_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_NON_MAX_SUPPRESSION_INFER_H -#define MINDSPORE_LITE_NNACL_NON_MAX_SUPPRESSION_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int NonMaxSuppressionInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, - size_t outputs_size, OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_NON_MAX_SUPPRESSION_INFER_H diff --git a/mindspore/lite/nnacl/infer/one_hot_infer.h b/mindspore/lite/nnacl/infer/one_hot_infer.h deleted file mode 100644 index 3e0305e158..0000000000 --- a/mindspore/lite/nnacl/infer/one_hot_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_ONE_HOT_INFER_H -#define MINDSPORE_LITE_NNACL_ONE_HOT_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/fp32/one_hot_fp32.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int OneHotInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_ONE_HOT_INFER_H diff --git a/mindspore/lite/nnacl/infer/pad_infer.h b/mindspore/lite/nnacl/infer/pad_infer.h deleted file mode 100644 index b97bea4b52..0000000000 --- a/mindspore/lite/nnacl/infer/pad_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_PAD_INFER_H -#define MINDSPORE_LITE_NNACL_PAD_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/pad_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int PadInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_PAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/partial_infer.h b/mindspore/lite/nnacl/infer/partial_infer.h deleted file mode 100644 index 7d9adbe8ca..0000000000 --- a/mindspore/lite/nnacl/infer/partial_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_PARTIAL_INFER_H -#define MINDSPORE_LITE_NNACL_PARTIAL_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/softmax_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int PartialInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_PARTIAL_INFER_H diff --git a/mindspore/lite/nnacl/infer/pooling_grad_infer.h b/mindspore/lite/nnacl/infer/pooling_grad_infer.h deleted file mode 100644 index d8104f35e8..0000000000 --- a/mindspore/lite/nnacl/infer/pooling_grad_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_POOLING_GRAD_INFER_H -#define MINDSPORE_LITE_NNACL_POOLING_GRAD_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/pooling_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int PoolingGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_POOLING_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/pooling_infer.h b/mindspore/lite/nnacl/infer/pooling_infer.h deleted file mode 100644 index 1f30eeaebb..0000000000 --- a/mindspore/lite/nnacl/infer/pooling_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_POOLING_INFER_H -#define MINDSPORE_LITE_NNACL_POOLING_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/pooling_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int PoolingInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_POOLING_INFER_H diff --git a/mindspore/lite/nnacl/infer/power_infer.h b/mindspore/lite/nnacl/infer/power_infer.h deleted file mode 100644 index 10823b6ee4..0000000000 --- a/mindspore/lite/nnacl/infer/power_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_POWER_INFER_H -#define MINDSPORE_LITE_NNACL_POWER_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/power_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int PowerInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_POWER_INFER_H diff --git a/mindspore/lite/nnacl/infer/prior_box_infer.h b/mindspore/lite/nnacl/infer/prior_box_infer.h deleted file mode 100644 index 1803485263..0000000000 --- a/mindspore/lite/nnacl/infer/prior_box_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_PRIOR_BOX_INFER_H -#define MINDSPORE_LITE_NNACL_PRIOR_BOX_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/prior_box_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int PriorBoxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_PRIOR_BOX_INFER_H diff --git a/mindspore/lite/nnacl/infer/quant_dtype_cast_infer.h b/mindspore/lite/nnacl/infer/quant_dtype_cast_infer.h deleted file mode 100644 index 8357fec315..0000000000 --- a/mindspore/lite/nnacl/infer/quant_dtype_cast_infer.h +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_QUANT_DTYPE_CAST_INFER_H -#define MINDSPORE_LITE_NNACL_QUANT_DTYPE_CAST_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct QuantDtypeCastParameter { - OpParameter op_parameter_; - int srcT_; // deprecated - int dstT_; -} QuantDtypeCastParameter; - -int QuantDtypeCastInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_QUANT_DTYPE_CAST_INFER_H diff --git a/mindspore/lite/nnacl/infer/random_standard_normal_infer.h b/mindspore/lite/nnacl/infer/random_standard_normal_infer.h deleted file mode 100644 index d91ba863ce..0000000000 --- a/mindspore/lite/nnacl/infer/random_standard_normal_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_RANDOM_STANDARD_NORMAL_INFER_H -#define MINDSPORE_LITE_NNACL_RANDOM_STANDARD_NORMAL_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int RandomStandardNormalInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, - size_t outputs_size, OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_RANDOM_STANDARD_NORMAL_INFER_H diff --git a/mindspore/lite/nnacl/infer/range_infer.h b/mindspore/lite/nnacl/infer/range_infer.h deleted file mode 100644 index c52e8cc406..0000000000 --- a/mindspore/lite/nnacl/infer/range_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_RANGE_INFER_H -#define MINDSPORE_LITE_NNACL_RANGE_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/fp32/range_fp32.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int RangeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_RANGE_INFER_H diff --git a/mindspore/lite/nnacl/infer/rank_infer.h b/mindspore/lite/nnacl/infer/rank_infer.h deleted file mode 100644 index ce162ed35b..0000000000 --- a/mindspore/lite/nnacl/infer/rank_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_RANK_INFER_H -#define MINDSPORE_LITE_NNACL_RANK_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int RankInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_RANK_INFER_H diff --git a/mindspore/lite/nnacl/infer/reduce_infer.h b/mindspore/lite/nnacl/infer/reduce_infer.h deleted file mode 100644 index 8bec1eb2ba..0000000000 --- a/mindspore/lite/nnacl/infer/reduce_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_REDUCE_INFER_H -#define MINDSPORE_LITE_NNACL_REDUCE_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/reduce_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ReduceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_REDUCE_INFER_H diff --git a/mindspore/lite/nnacl/infer/reshape_infer.h b/mindspore/lite/nnacl/infer/reshape_infer.h deleted file mode 100644 index adc01b9dac..0000000000 --- a/mindspore/lite/nnacl/infer/reshape_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_RESHAPE_INFER_H -#define MINDSPORE_LITE_NNACL_RESHAPE_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/reshape_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ReshapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_RESHAPE_INFER_H diff --git a/mindspore/lite/nnacl/infer/resize_infer.h b/mindspore/lite/nnacl/infer/resize_infer.h deleted file mode 100644 index 50ad390ab6..0000000000 --- a/mindspore/lite/nnacl/infer/resize_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_RESIZE_INFER_H -#define MINDSPORE_LITE_NNACL_RESIZE_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/resize_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ResizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_RESIZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/rfft_infer.h b/mindspore/lite/nnacl/infer/rfft_infer.h deleted file mode 100644 index c430cb342b..0000000000 --- a/mindspore/lite/nnacl/infer/rfft_infer.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_RFFT_INFER_H -#define MINDSPORE_LITE_NNACL_RFFT_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct RfftParameter { - OpParameter op_parameter_; - int fft_length_; -} RfftParameter; - -int RfftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_RFFT_INFER_H diff --git a/mindspore/lite/nnacl/infer/roi_pooling_infer.h b/mindspore/lite/nnacl/infer/roi_pooling_infer.h deleted file mode 100644 index 7fb99468c0..0000000000 --- a/mindspore/lite/nnacl/infer/roi_pooling_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_ROI_POOLING_INFER_H -#define MINDSPORE_LITE_NNACL_ROI_POOLING_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/fp32/roi_pooling_fp32.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ROIPoolingInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_ROI_POOLING_INFER_H diff --git a/mindspore/lite/nnacl/infer/scatter_nd_infer.h b/mindspore/lite/nnacl/infer/scatter_nd_infer.h deleted file mode 100644 index 5ee5acdaad..0000000000 --- a/mindspore/lite/nnacl/infer/scatter_nd_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SCATTER_ND_INFER_H -#define MINDSPORE_LITE_NNACL_SCATTER_ND_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/softmax_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ScatterNdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_SCATTER_ND_INFER_H diff --git a/mindspore/lite/nnacl/infer/select_infer.h b/mindspore/lite/nnacl/infer/select_infer.h deleted file mode 100644 index 1b95ebf830..0000000000 --- a/mindspore/lite/nnacl/infer/select_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SELECT_INFER_H -#define MINDSPORE_LITE_NNACL_SELECT_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int SelectInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_SELECT_INFER_H diff --git a/mindspore/lite/nnacl/infer/sgd_infer.h b/mindspore/lite/nnacl/infer/sgd_infer.h deleted file mode 100644 index 8d47efdcda..0000000000 --- a/mindspore/lite/nnacl/infer/sgd_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SGD_INFER_H -#define MINDSPORE_LITE_NNACL_SGD_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int SgdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_SGD_INFER_H diff --git a/mindspore/lite/nnacl/infer/shape_infer.h b/mindspore/lite/nnacl/infer/shape_infer.h deleted file mode 100644 index 30be218bc6..0000000000 --- a/mindspore/lite/nnacl/infer/shape_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SHAPE_INFER_H -#define MINDSPORE_LITE_NNACL_SHAPE_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ShapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_SHAPE_INFER_H diff --git a/mindspore/lite/nnacl/infer/size_infer.h b/mindspore/lite/nnacl/infer/size_infer.h deleted file mode 100644 index b299c0e1ad..0000000000 --- a/mindspore/lite/nnacl/infer/size_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SIZE_INFER_H -#define MINDSPORE_LITE_NNACL_SIZE_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int SizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_SIZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/skip_gram_infer.h b/mindspore/lite/nnacl/infer/skip_gram_infer.h deleted file mode 100644 index 6b54fc1c9a..0000000000 --- a/mindspore/lite/nnacl/infer/skip_gram_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SKIP_GRAM_INFER_H -#define MINDSPORE_LITE_NNACL_SKIP_GRAM_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int SkipGramInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_SKIP_GRAM_INFER_H diff --git a/mindspore/lite/nnacl/infer/slice_infer.h b/mindspore/lite/nnacl/infer/slice_infer.h deleted file mode 100644 index 0aa3f79ce3..0000000000 --- a/mindspore/lite/nnacl/infer/slice_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SLICE_INFER_H -#define MINDSPORE_LITE_NNACL_SLICE_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/slice_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int SliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_SLICE_INFER_H diff --git a/mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.h b/mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.h deleted file mode 100644 index b66aa8d7ef..0000000000 --- a/mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SOFTMAX_CROSS_ENTROPY_INFER_H -#define MINDSPORE_LITE_NNACL_SOFTMAX_CROSS_ENTROPY_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int SoftmaxCrossEntropyInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, - size_t outputs_size, OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_SOFTMAX_ENTROPY_INFER_H diff --git a/mindspore/lite/nnacl/infer/softmax_infer.h b/mindspore/lite/nnacl/infer/softmax_infer.h deleted file mode 100644 index ba22743fea..0000000000 --- a/mindspore/lite/nnacl/infer/softmax_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SOFTMAX_INFER_H -#define MINDSPORE_LITE_NNACL_SOFTMAX_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/softmax_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int SoftMaxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_SOFTMAX_INFER_H diff --git a/mindspore/lite/nnacl/infer/space_to_batch_infer.h b/mindspore/lite/nnacl/infer/space_to_batch_infer.h deleted file mode 100644 index e6e8743222..0000000000 --- a/mindspore/lite/nnacl/infer/space_to_batch_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SPACE_TO_BATCH_INFER_H -#define MINDSPORE_LITE_NNACL_SPACE_TO_BATCH_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/fp32/space_to_batch_fp32.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int SpaceToBatchInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_SPACE_TO_BATCH_INFER_H diff --git a/mindspore/lite/nnacl/infer/space_to_batch_nd_infer.h b/mindspore/lite/nnacl/infer/space_to_batch_nd_infer.h deleted file mode 100644 index c8bc25e2c4..0000000000 --- a/mindspore/lite/nnacl/infer/space_to_batch_nd_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SPACE_TO_BATCH_ND_INFER_H -#define MINDSPORE_LITE_NNACL_SPACE_TO_BATCH_ND_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/fp32/space_to_batch_fp32.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int SpaceToBatchNdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_SPACE_TO_BATCH_ND_INFER_H diff --git a/mindspore/lite/nnacl/infer/space_to_depth_infer.h b/mindspore/lite/nnacl/infer/space_to_depth_infer.h deleted file mode 100644 index 65dfefdd2e..0000000000 --- a/mindspore/lite/nnacl/infer/space_to_depth_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SPACE_TO_DEPTH_INFER_H -#define MINDSPORE_LITE_NNACL_SPACE_TO_DEPTH_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/space_to_depth_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int SpaceToDepthInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_SPACE_TO_DEPTH_INFER_H diff --git a/mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.h b/mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.h deleted file mode 100644 index 0908724ee5..0000000000 --- a/mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INFER_SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_INFER_H_ -#define MINDSPORE_LITE_NNACL_INFER_SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_INFER_H_ - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int SparseSoftmaxCrossEntropyWithLogitsInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, - size_t outputs_size, OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_INFER_SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_INFER_H_ diff --git a/mindspore/lite/nnacl/infer/sparse_to_dense_infer.h b/mindspore/lite/nnacl/infer/sparse_to_dense_infer.h deleted file mode 100644 index 1e274247e2..0000000000 --- a/mindspore/lite/nnacl/infer/sparse_to_dense_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SPACE_TO_DENSE_INFER_H -#define MINDSPORE_LITE_NNACL_SPACE_TO_DENSE_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int SparseToDenseInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_SPACE_TO_DENSE_INFER_H diff --git a/mindspore/lite/nnacl/infer/splice_infer.h b/mindspore/lite/nnacl/infer/splice_infer.h deleted file mode 100644 index bb7584cc19..0000000000 --- a/mindspore/lite/nnacl/infer/splice_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INFER_SPLICE_INFER_H_ -#define MINDSPORE_LITE_NNACL_INFER_SPLICE_INFER_H_ -#include "nnacl/infer/common_infer.h" -#include "nnacl/splice_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int SpliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_INFER_SPLICE_INFER_H_ diff --git a/mindspore/lite/nnacl/infer/split_infer.h b/mindspore/lite/nnacl/infer/split_infer.h deleted file mode 100644 index 7745fd26cb..0000000000 --- a/mindspore/lite/nnacl/infer/split_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SPLIT_INFER_H -#define MINDSPORE_LITE_NNACL_SPLIT_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/split_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int SplitInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_SPLIT_INFER_H diff --git a/mindspore/lite/nnacl/infer/squeeze_infer.h b/mindspore/lite/nnacl/infer/squeeze_infer.h deleted file mode 100644 index 9b7409ab28..0000000000 --- a/mindspore/lite/nnacl/infer/squeeze_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SQUEEZE_INFER_H -#define MINDSPORE_LITE_NNACL_SQUEEZE_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/squeeze_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int SqueezeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_SQUEEZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/stack_infer.h b/mindspore/lite/nnacl/infer/stack_infer.h deleted file mode 100644 index 40e47158e5..0000000000 --- a/mindspore/lite/nnacl/infer/stack_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_STACK_INFER_H -#define MINDSPORE_LITE_NNACL_STACK_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/stack_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int StackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_STACK_INFER_H diff --git a/mindspore/lite/nnacl/infer/strided_slice_grad_infer.h b/mindspore/lite/nnacl/infer/strided_slice_grad_infer.h deleted file mode 100644 index 9e4ed8ea56..0000000000 --- a/mindspore/lite/nnacl/infer/strided_slice_grad_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_STRIDED_SLICE_GRAD_INFER_H -#define MINDSPORE_LITE_NNACL_STRIDED_SLICE_GRAD_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/strided_slice_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int StridedSliceGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_STRIDED_SLICE_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/strided_slice_infer.h b/mindspore/lite/nnacl/infer/strided_slice_infer.h deleted file mode 100644 index 1c9792eb43..0000000000 --- a/mindspore/lite/nnacl/infer/strided_slice_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_STRIDED_SLICE_INFER_H -#define MINDSPORE_LITE_NNACL_STRIDED_SLICE_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/strided_slice_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int StridedSliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_STRIDED_SLICE_INFER_H diff --git a/mindspore/lite/nnacl/infer/switch_infer.h b/mindspore/lite/nnacl/infer/switch_infer.h deleted file mode 100644 index 673d1efa63..0000000000 --- a/mindspore/lite/nnacl/infer/switch_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_SWITCH_INFER_H -#define MINDSPORE_LITE_NNACL_SWITCH_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/softmax_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int SwitchInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_SWITCH_INFER_H diff --git a/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.h b/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.h deleted file mode 100644 index 9ac106cc22..0000000000 --- a/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_TENSORLIST_FROMTENSOR_INFER_H -#define MINDSPORE_LITE_NNACL_TENSORLIST_FROMTENSOR_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int TensorListFromTensorInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, - size_t outputs_size, OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_TENSORLIST_FROMTENSOR_INFER_H diff --git a/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.h b/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.h deleted file mode 100644 index 663a626a04..0000000000 --- a/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_TENSORLIST_GETITEM_INFER_H -#define MINDSPORE_LITE_NNACL_TENSORLIST_GETITEM_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/tensorlist_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int TensorListGetItemInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, - size_t outputs_size, OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_TENSORLIST_GETITEM_INFER_H diff --git a/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.h b/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.h deleted file mode 100644 index 4cd2c453e2..0000000000 --- a/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_TENSORLIST_RESERVE_INFER_H -#define MINDSPORE_LITE_NNACL_TENSORLIST_RESERVE_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int TensorListReserveInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, - size_t outputs_size, OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_TENSORLIST_RESERVE_INFER_H diff --git a/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.h b/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.h deleted file mode 100644 index d7b6b20d10..0000000000 --- a/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_TENSORLIST_SETITEM_INFER_H -#define MINDSPORE_LITE_NNACL_TENSORLIST_SETITEM_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int TensorListSetItemInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, - size_t outputs_size, OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_TENSORLIST_SETITEM_INFER_H diff --git a/mindspore/lite/nnacl/infer/tensorlist_stack_infer.h b/mindspore/lite/nnacl/infer/tensorlist_stack_infer.h deleted file mode 100644 index 38d6ce0cfd..0000000000 --- a/mindspore/lite/nnacl/infer/tensorlist_stack_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_TENSORLIST_STACK_INFER_H -#define MINDSPORE_LITE_NNACL_TENSORLIST_STACK_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int TensorListStackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_TENSORLIST_STACK_INFER_H diff --git a/mindspore/lite/nnacl/infer/tile_infer.h b/mindspore/lite/nnacl/infer/tile_infer.h deleted file mode 100644 index f5200949da..0000000000 --- a/mindspore/lite/nnacl/infer/tile_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_TILE_INFER_H -#define MINDSPORE_LITE_NNACL_TILE_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/base/tile_base.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int TileInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_TILE_INFER_H diff --git a/mindspore/lite/nnacl/infer/topk_infer.h b/mindspore/lite/nnacl/infer/topk_infer.h deleted file mode 100644 index 791cabdf8f..0000000000 --- a/mindspore/lite/nnacl/infer/topk_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_TOPK_INFER_H -#define MINDSPORE_LITE_NNACL_TOPK_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/fp32/topk_fp32.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int TopKInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_TOPK_INFER_H diff --git a/mindspore/lite/nnacl/infer/transpose_infer.h b/mindspore/lite/nnacl/infer/transpose_infer.h deleted file mode 100644 index 4a8cb4aec8..0000000000 --- a/mindspore/lite/nnacl/infer/transpose_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_TRANSPOSE_INFER_H -#define MINDSPORE_LITE_NNACL_TRANSPOSE_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/transpose.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int TransposeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_TRANSPOSE_INFER_H diff --git a/mindspore/lite/nnacl/infer/uniform_real_infer.h b/mindspore/lite/nnacl/infer/uniform_real_infer.h deleted file mode 100644 index ceef8dec71..0000000000 --- a/mindspore/lite/nnacl/infer/uniform_real_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_UNIFORM_REAL_INFER_H -#define MINDSPORE_LITE_NNACL_UNIFORM_REAL_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int UniformRealInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_UNIFORM_REAL_INFER_H diff --git a/mindspore/lite/nnacl/infer/unique_infer.h b/mindspore/lite/nnacl/infer/unique_infer.h deleted file mode 100644 index ec8b8d434d..0000000000 --- a/mindspore/lite/nnacl/infer/unique_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_UNIQUE_INFER_H -#define MINDSPORE_LITE_NNACL_UNIQUE_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int UniqueInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_UNIQUE_INFER_H diff --git a/mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.h b/mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.h deleted file mode 100644 index 3945e2907b..0000000000 --- a/mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_UNSORTED_SEGMENT_SUM_INFER_H -#define MINDSPORE_LITE_NNACL_UNSORTED_SEGMENT_SUM_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct UnsortedSegmentSumParameter { - OpParameter op_parameter_; - int segments_num_; -} UnsortedSegmentSumParameter; - -int UnsortedSegmentSumInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, - size_t outputs_size, OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_UNSORTED_SEGMENT_SUM_INFER_H diff --git a/mindspore/lite/nnacl/infer/unsqueeze_infer.h b/mindspore/lite/nnacl/infer/unsqueeze_infer.h deleted file mode 100644 index 72db2bcc19..0000000000 --- a/mindspore/lite/nnacl/infer/unsqueeze_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_UNSQUEEZE_INFER_H -#define MINDSPORE_LITE_NNACL_UNSQUEEZE_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/unsqueeze_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int UnsqueezeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_UNSQUEEZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/unstack_infer.h b/mindspore/lite/nnacl/infer/unstack_infer.h deleted file mode 100644 index 787e369f01..0000000000 --- a/mindspore/lite/nnacl/infer/unstack_infer.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_UNSTACK_INFER_H -#define MINDSPORE_LITE_NNACL_UNSTACK_INFER_H - -#include "nnacl/infer/common_infer.h" -#include "nnacl/unstack_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int UnstackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_UNSTACK_INFER_H diff --git a/mindspore/lite/nnacl/infer/where_infer.h b/mindspore/lite/nnacl/infer/where_infer.h deleted file mode 100644 index 182a8b45ce..0000000000 --- a/mindspore/lite/nnacl/infer/where_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_WHERE_INFER_H -#define MINDSPORE_LITE_NNACL_WHERE_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int WhereInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_WHERE_INFER_H diff --git a/mindspore/lite/nnacl/infer/while_infer.h b/mindspore/lite/nnacl/infer/while_infer.h deleted file mode 100644 index 10616d5b19..0000000000 --- a/mindspore/lite/nnacl/infer/while_infer.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_WHILE_INFER_H -#define MINDSPORE_LITE_NNACL_WHILE_INFER_H - -#include "nnacl/infer/common_infer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int WhileInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, - OpParameter *parameter); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_WHILE_INFER_H diff --git a/mindspore/lite/nnacl/instance_norm_parameter.h b/mindspore/lite/nnacl/instance_norm_parameter.h deleted file mode 100644 index 988561a3e6..0000000000 --- a/mindspore/lite/nnacl/instance_norm_parameter.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INSTANCE_NORM_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_INSTANCE_NORM_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct InstanceNormParameter { - // Primitive parameter - OpParameter op_parameter_; - float epsilon_; - // shape correlative - int batch_; - int channel_; - int inner_size_; -} InstanceNormParameter; - -#endif // MINDSPORE_LITE_NNACL_INSTANCE_NORM_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/int8/add_int8.h b/mindspore/lite/nnacl/int8/add_int8.h deleted file mode 100644 index 8663d79a67..0000000000 --- a/mindspore/lite/nnacl/int8/add_int8.h +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_ADD_INT8_H_ -#define MINDSPORE_LITE_NNACL_ADD_INT8_H_ - -#ifdef ENABLE_AVX -#include -#endif -#include "nnacl/op_base.h" -#include "nnacl/errorcode.h" -#include "nnacl/arithmetic.h" -#include "nnacl/int8/arithmetic_int8.h" - -typedef struct AddQuantQrgs { - int32_t zp_; - int32_t left_shift_; - int32_t right_shift_; - int32_t multiplier_; -} AddQuantQrgs; - -typedef struct AddQuantParameter { - int left_shift_; - int32_t min_; - int32_t max_; - - AddQuantQrgs in0_args_; - AddQuantQrgs in1_args_; - - int32_t out_zp_; - int32_t out_left_shift_; - int32_t out_right_shift_; - int32_t out_multiplier_; -} AddQuantParameter; - -#ifdef __cplusplus -extern "C" { -#endif - -void AddInt8(const int8_t *input0, const int8_t *input1, int8_t *output, int size, AddQuantParameter *params); - -void AddOptInt8(const int8_t *ptr_in, const int8_t element_in, int8_t *output, int size, AddQuantParameter *params, - AddQuantQrgs *ptr_args, AddQuantQrgs *ele_args); - -int ElementAddInt8(const int8_t *in0, const int8_t *in1, int8_t *out, int size); - -int BroadcastAddInt8(const int8_t *in0, const int8_t *in1, int8_t *tile_in0, int8_t *tile_in1, int8_t *out, int size, - ArithmeticParameter *param); - -#ifdef ENABLE_AVX -void AddInt8_AVX2(const int8_t *input0, const int8_t *input1, int8_t *output, int size, AddQuantParameter *params); - -void AddOptInt8_AVX2(const int8_t *ptr_in, const int8_t element_in, int8_t *output, int size, AddQuantParameter *params, - AddQuantQrgs *ptr_args, AddQuantQrgs *ele_args); -#endif -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_ADD_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/arg_min_max_int8.h b/mindspore/lite/nnacl/int8/arg_min_max_int8.h deleted file mode 100644 index 26854cdd34..0000000000 --- a/mindspore/lite/nnacl/int8/arg_min_max_int8.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INT8_ARG_MIN_MAX_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_ARG_MIN_MAX_INT8_H_ - -#include "nnacl/arg_min_max_parameter.h" -#include "nnacl/int8/quantize.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void Int8ArgMinMaxQuant(const int8_t *input, int8_t *output, const int *in_shape, ArgMinMaxParameter *param, - QuantArg *in_quant, QuantArg *out_quant); -void Int8ArgMinMaxDim0(const int8_t *input, int8_t *output, const int *in_shape, ArgMinMaxParameter *param, - QuantArg *in_quant, QuantArg *out_quant); -void Int8ArgMinMaxDim1(const int8_t *input, int8_t *output, const int *in_shape, ArgMinMaxParameter *param, - QuantArg *in_quant, QuantArg *out_quant); -void Int8ArgMinMaxDim2(const int8_t *input, int8_t *output, const int *in_shape, ArgMinMaxParameter *param, - QuantArg *in_quant, QuantArg *out_quant); -void Int8ArgMinMaxDim3(const int8_t *input, int8_t *output, const int *in_shape, ArgMinMaxParameter *param, - QuantArg *in_quant, QuantArg *out_quant); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_ARG_MIN_MAX_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/arithmetic_int8.h b/mindspore/lite/nnacl/int8/arithmetic_int8.h deleted file mode 100644 index ec9d5c6fa1..0000000000 --- a/mindspore/lite/nnacl/int8/arithmetic_int8.h +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INT8_ARITHMETIC_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_ARITHMETIC_INT8_H_ - -#include "nnacl/op_base.h" -#include "nnacl/int8/quantize.h" -#include "nnacl/base/arithmetic_base.h" - -#ifdef __cplusplus -extern "C" { -#endif -void TileOneDimensionInt8(const int8_t *inData, int8_t *outData, int dim, size_t ndim, const int *inShape, - const int *inStrides, const int *outStrides, const int *multiple); -void TileDimensionsInt8(const int8_t *data0, const int8_t *data1, int8_t *tile_data0, int8_t *tile_data1, - ArithmeticParameter *param); - -int ElementNotEqualInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, - ArithmeticQuantArg *quant_arg); - -int ElementEqualInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, ArithmeticQuantArg *quant_arg); - -int ElementLessInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, ArithmeticQuantArg *quant_arg); - -int ElementLessEqualInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, - ArithmeticQuantArg *quant_arg); - -int ElementGreaterInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, - ArithmeticQuantArg *quant_arg); - -int ElementGreaterEqualInt8(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, - ArithmeticQuantArg *quant_arg); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_ARITHMETIC_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/arithmetic_self_int8.h b/mindspore/lite/nnacl/int8/arithmetic_self_int8.h deleted file mode 100644 index 78ad1e0032..0000000000 --- a/mindspore/lite/nnacl/int8/arithmetic_self_int8.h +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_ARITHMETIC_SELF_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_ARITHMETIC_SELF_INT8_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" -#include "nnacl/errorcode.h" -#include "nnacl/int8/quantize.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int Int8ElementRound(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); - -int Int8ElementFloor(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); - -int Int8ElementCeil(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); - -int Int8ElementAbs(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); - -int Int8ElementSin(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); - -int Int8ElementCos(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); - -int Int8ElementLog(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); - -int Int8ElementSqrt(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); - -int Int8ElementRsqrt(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); - -int Int8ElementSquare(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); - -int Int8ElementLogicalNot(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); - -int Int8ElementReciprocal(int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_ARITHMETIC_SELF_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/batch_to_space_int8.h b/mindspore/lite/nnacl/int8/batch_to_space_int8.h deleted file mode 100644 index a2e0a18cbc..0000000000 --- a/mindspore/lite/nnacl/int8/batch_to_space_int8.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INT8_BATCH_TO_SPACE_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_BATCH_TO_SPACE_INT8_H_ -#include "nnacl/op_base.h" -#include "nnacl/int8/quantize.h" - -#ifdef __cplusplus -extern "C" { -#endif -void BatchToSpaceNoCropForNHWCInt8(const int8_t *input, int8_t *output, const int *in_shape, int out_n, - const int *block, QuantArg *in_quant_arg, QuantArg *out_quant_arg); -void BatchToSpaceForNHWCInt8(const int8_t *input, int8_t *output, const int *in_shape, int out_n, const int *block, - const int *crops, QuantArg *in_quant_arg, QuantArg *out_quant_arg); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_BATCH_TO_SPACE_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/batchnorm_int8.h b/mindspore/lite/nnacl/int8/batchnorm_int8.h deleted file mode 100644 index 95f59f0401..0000000000 --- a/mindspore/lite/nnacl/int8/batchnorm_int8.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_BATCHNORM_H_ -#define MINDSPORE_LITE_NNACL_INT8_BATCHNORM_H_ - -#include "nnacl/op_base.h" -#include "nnacl/batchnorm_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void BatchNormInt8(int8_t *output_ptr, const int8_t *input_ptr, const float *alpha_ptr, const float *beta_ptr, - int task_id, BatchNormParameter *param); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_BATCHNORM_H_ diff --git a/mindspore/lite/nnacl/int8/common_func_int8.h b/mindspore/lite/nnacl/int8/common_func_int8.h deleted file mode 100644 index ae0e0c5ced..0000000000 --- a/mindspore/lite/nnacl/int8/common_func_int8.h +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_COMMON_FUNC_H_ -#define MINDSPORE_LITE_NNACL_INT8_COMMON_FUNC_H_ - -#include -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" -#include "nnacl/conv_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -void PostFuncInt8C4(const int32_t *in, const int32_t *bias, int8_t *out, size_t oc, size_t plane, size_t stride, - int32_t multiplier, int32_t left_shift, int32_t right_shift, int32_t zp, int32_t mini, - int32_t maxi); -#ifdef ENABLE_ARM -void ConvDwInt8Row(int32_t *output_ptr, const int8_t *input_ptr, const int16_t *weight_ptr, int num_pixels, - int output_channel, int input_step, int8_t input_zp); -void ConvDwInt8PostAlign4PerChannel(int8_t *dst, int32_t *buffer, int channel4, int32_t output_zp, - int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, int32_t acc_min, - int32_t acc_max); -void ConvDwInt8PostAlign4(int8_t *dst, int32_t *buffer, int num_pixels, int32_t output_zp, int32_t out_multiplier, - int32_t left_shift, int32_t right_shift, int32_t acc_min, int32_t acc_max); -void IndirectGemmInt16to32_8x4(int32_t *dst, const int16_t *src, const int16_t *weight, size_t ksize, size_t ic8, - size_t oc4, size_t offset); -void ConvDwInt8Center(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, size_t height, - size_t width, size_t kernel_h, size_t kernel_w, size_t out_h_step, size_t block_channel, - size_t in_sh_step, size_t in_sw_step, size_t in_kh_step, size_t in_kw_step, int8_t *in_zp, - int32_t *out_zp, int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, - int32_t *acc_min, int32_t *acc_max); -void DeconvDwInt8Center(int32_t *dst, const int16_t *src, const int16_t *weight, size_t height, size_t width, - size_t kernel_h, size_t kernel_w, size_t out_h_step, size_t block_channel, size_t in_sh_step, - size_t in_sw_step, size_t in_kh_step, size_t in_kw_step); -void DeconvDwInt8Post(int8_t *dst, int32_t *output_buffer, const int32_t *bias, int block_channel, int pixel_nums, - int out_multiplier, int left_shift, int right_shift, int32_t out_zp, int32_t acc_min, - int32_t acc_max); -int16x8_t LoadAndAddOffset(int8_t *data, int index, int offset); -int32x4_t ClacScaledInput(int32x4_t input, int32x4_t left_shift_result_vec, int32x4_t input_multiplier_vec, - int32x4_t right_shift_vec); -#endif - -#ifdef ENABLE_ARM32 -void ConvDw3x3Int8BorderPixel(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, int height, - int width, int in_kh_step, int in_kw_step, int channel, int8_t in_zp, int32_t out_zp, - int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, int32_t acc_min, - int32_t acc_max, size_t per_channel); -#endif - -#ifdef ENABLE_ARM64 -void PostFuncInt8C4Neon64(const int32_t *in, const int32_t *bias, int8_t *out, size_t oc4div, size_t oc4res, - size_t plane, size_t stride, int32_t multiplier, int32_t left_shift, int32_t right_shift, - int32_t zp, int32_t mini, int32_t maxi); -void ConvDw3x3Int8Neon64(int8_t *output, const int8_t *input, const int16_t *weight, const int32_t *bias, - int input_col_size, int input_row_size, int channel, int output_h, int output_w, int8_t in_zp, - int32_t out_zp, int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, - int32_t acc_min, int32_t acc_max, size_t per_channel); -void ConvDw3x3Int8Stride2(int8_t *output, const int8_t *input, const int16_t *weight, const int32_t *bias, - int input_col_size, int input_row_size, int channel, int output_h, int output_w, int8_t in_zp, - int32_t out_zp, int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, - int32_t acc_min, int32_t acc_max, size_t per_channel); -void ConvDw3x3Int8Corner(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, size_t in_kh_step, - size_t in_kw_step, size_t channel, size_t in_zp, size_t out_zp, int32_t *out_multiplier, - int32_t *left_shift, int32_t *right_shift, size_t acc_min, size_t acc_max, size_t per_channel); -void ConvDw3x3Int8Vertical(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, - size_t in_kh_step, size_t in_kw_step, size_t channel, size_t in_zp, size_t out_zp, - int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, size_t acc_min, - size_t acc_max, size_t per_channel); -void ConvDw3x3Int8Horizontal(int8_t *dst, const int8_t *src, const int16_t *weight, const int32_t *bias, - size_t in_kh_step, size_t in_kw_step, size_t channel, size_t in_zp, size_t out_zp, - int32_t *out_multiplier, int32_t *left_shift, int32_t *right_shift, size_t acc_min, - size_t acc_max, size_t per_channel); -#endif -#ifdef __cplusplus -} -#endif - -#endif /* MINDSPORE_LITE_NNACL_FP32_COMMON_FUNC_H_ */ diff --git a/mindspore/lite/nnacl/int8/concat_int8.h b/mindspore/lite/nnacl/int8/concat_int8.h deleted file mode 100644 index 2c2f9d05d5..0000000000 --- a/mindspore/lite/nnacl/int8/concat_int8.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_CONCAT_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_CONCAT_INT8_H_ - -#include "nnacl/op_base.h" -#include "nnacl/concat_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -void Int8Concat(int8_t **inputs, int8_t *output_ptr, ConcatParameter *para, int axis, int64_t real_dst_count, - int task_id); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_CONCAT_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/conv1x1_int8.h b/mindspore/lite/nnacl/int8/conv1x1_int8.h deleted file mode 100644 index ec2ef268f5..0000000000 --- a/mindspore/lite/nnacl/int8/conv1x1_int8.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INT8_CONV1X1_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_CONV1X1_INT8_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/pack.h" -#include "nnacl/op_base.h" -#include "nnacl/common_func.h" -#include "nnacl/conv_parameter.h" -#include "nnacl/int8/quantize.h" -#include "nnacl/matmul_parameter.h" -#include "nnacl/int8/matmul_int8.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void Conv1x1Int8(const int8_t *packed_input, const int8_t *packed_weight, int8_t *dst, const int32_t *input_sum, - const int32_t *bias, int row, int col, int deep16, int32_t *left_shift, int32_t *right_shift, - int32_t *multiplier, ConvParameter *conv_param, int32_t *filter_zp); -void Conv1x1Int8Opt(const int8_t *packed_input, const int8_t *packed_weight, int8_t *dst, const int32_t *input_sum, - const int32_t *bias, int row, int col, int deep4, int32_t *left_shift, int32_t *right_shift, - int32_t *multiplier, ConvParameter *conv_param, MATMUL_OPT_DP_FUNC matmul_func, int32_t *filter_zp); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_CONV1X1_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/conv3x3_int8.h b/mindspore/lite/nnacl/int8/conv3x3_int8.h deleted file mode 100644 index 5c1c9818a2..0000000000 --- a/mindspore/lite/nnacl/int8/conv3x3_int8.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INT8_CONV_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_CONV_INT8_H_ - -#include -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/pack.h" -#include "nnacl/op_base.h" -#include "nnacl/common_func.h" -#include "nnacl/conv_parameter.h" -#include "nnacl/int8/fixed_point.h" -#include "nnacl/int8/quantize.h" -#include "nnacl/matmul_parameter.h" -#include "nnacl/int8/matmul_int8.h" -#include "nnacl/int8/common_func_int8.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void Conv3x3Int8FilterTransform(const int16_t *weight_data, int16_t *trans_weight, int iC8, int output_channel, - int kernel_plane); - -void Conv3x3Int8(int16_t *input_data, int16_t *transed_weight, const int32_t *bias_data, int8_t *output_data, - int16_t *tile_buffer, int16_t *block_unit_buffer, int32_t *tmp_dst_buffer, int8_t *tmp_out, - int task_id, ConvParameter *conv_param); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_CONV_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/conv_depthwise_int8.h b/mindspore/lite/nnacl/int8/conv_depthwise_int8.h deleted file mode 100644 index 9ff845a3e7..0000000000 --- a/mindspore/lite/nnacl/int8/conv_depthwise_int8.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_CONV_DEPTHWISE_H_ -#define MINDSPORE_LITE_NNACL_INT8_CONV_DEPTHWISE_H_ - -#include "nnacl/conv_parameter.h" -#include "nnacl/fp32/conv_depthwise_fp32.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void ConvDwInt8(int8_t *output_data, int32_t *output_row, const int8_t *input_data, const int16_t *weight_data, - const int32_t *bias_data, const ConvParameter *conv_param, int task_id); - -void ConvDw3x3Int8Pad(int8_t *output_data, const int8_t *input_data, const int16_t *weight_data, - const int32_t *bias_data, const ConvParameter *conv_param, const SlidingWindowParam *sliding); - -void ConvDw3x3Int8(int8_t *output_data, int8_t *buffer, const int8_t *input_data, const int16_t *weight_data, - const int32_t *bias_data, const ConvParameter *conv_param, const SlidingWindowParam *sliding, - int task_id); - -void ConvDwInt8SW(int8_t *output_data, const int8_t *input_data, const int16_t *weight_data, const int32_t *bias_data, - int8_t *input_zp, int32_t *output_zp, const ConvParameter *conv_param, - const SlidingWindowParam *sliding, int task_id); - -void DeconvDwInt8(int8_t *output_data, int32_t *output_buffer, const int16_t *input_data, const int16_t *weight_data, - const int32_t *bias_data, const ConvParameter *conv_param, const SlidingWindowParam *sliding, - int task_id); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_CONV_DEPTHWISE_H_ diff --git a/mindspore/lite/nnacl/int8/conv_int8.h b/mindspore/lite/nnacl/int8/conv_int8.h deleted file mode 100644 index bc0aab94db..0000000000 --- a/mindspore/lite/nnacl/int8/conv_int8.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INT8_CONV_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_CONV_INT8_H_ - -#include -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/pack.h" -#include "nnacl/op_base.h" -#include "nnacl/common_func.h" -#include "nnacl/conv_parameter.h" -#include "nnacl/int8/quantize.h" -#include "nnacl/matmul_parameter.h" -#include "nnacl/int8/matmul_int8.h" -#include "nnacl/int8/common_func_int8.h" - -#ifdef __cplusplus -extern "C" { -#endif -// int8 conv common -void ConvInt8(int8_t *input_data, int8_t *packed_input, int8_t *matmul_input, int8_t *packed_weight, - const int32_t *bias_data, int8_t *output_data, int32_t *filter_zp, int32_t *input_sum, int task_id, - ConvParameter *conv_param, MATMUL_OPT_R_FUNC matmul_func, bool is_optimize); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_CONV_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/crop_int8.h b/mindspore/lite/nnacl/int8/crop_int8.h deleted file mode 100644 index 77c2adef40..0000000000 --- a/mindspore/lite/nnacl/int8/crop_int8.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_CROP_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_CROP_INT8_H_ -#include "nnacl/op_base.h" -#include "nnacl/crop_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -void Int8Crop(const int8_t *input, int8_t *output, int task_id, CropParameter *para); -void Int8Crop1D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); -void Int8Crop2D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); -void Int8Crop3D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); -void Int8Crop4D(const int8_t *input, int8_t *output, int task_id, CropParameter *para); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_CROP_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/deconv_int8.h b/mindspore/lite/nnacl/int8/deconv_int8.h deleted file mode 100644 index 339f6cb40e..0000000000 --- a/mindspore/lite/nnacl/int8/deconv_int8.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INT8_DECONV_H_ -#define MINDSPORE_LITE_NNACL_INT8_DECONV_H_ - -#include -#include "nnacl/pack.h" -#include "nnacl/op_base.h" -#include "nnacl/errorcode.h" -#include "nnacl/conv_parameter.h" -#include "nnacl/common_func.h" -#include "nnacl/int8/matmul_int8.h" - -#ifdef __cplusplus -extern "C" { -#endif -void DeConvPackWeightSum(int8_t *weight, int32_t *weight_sum, int32_t input_zp, int32_t filter_zp, int deep16, int col4, - bool suppport_opt); -void DeConvPackInputSum(const int8_t *src, int32_t *dst, int32_t filter_zp, size_t row4, size_t col16, - bool suppport_opt); -void DeConvWeightTransInt8(int8_t *src, int8_t *dst, int input_channel, int output_channel, int plane, - bool support_optimize_); - -int DeConvInt8(const int8_t *input, const int8_t *weight, int32_t *output, int32_t *weight_sum, int32_t *input_sum, - size_t act_row, size_t act_col, size_t act_deep, ConvParameter *conv_param, - MATMUL_OPT_R4_FUNC matmul_func); -int DeConvPostInt8(const int32_t *src, const int32_t *bias, int32_t *tmp, int8_t *out, int output_channel, - ConvParameter *conv_param, bool support_optimize); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_DECONV_H_ diff --git a/mindspore/lite/nnacl/int8/depth_to_space_int8.h b/mindspore/lite/nnacl/int8/depth_to_space_int8.h deleted file mode 100644 index 5af96940d5..0000000000 --- a/mindspore/lite/nnacl/int8/depth_to_space_int8.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INT8_DEPTH_TO_SPACE_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_DEPTH_TO_SPACE_INT8_H_ - -#include "nnacl/depth_to_space_parameter.h" -#include "nnacl/int8/quantize.h" - -#ifdef __cplusplus -extern "C" { -#endif -void DepthToSpaceForNHWCInt8(const int8_t *input, int8_t *output, const int *in_shape, DepthToSpaceParameter *param, - QuantArg *in_quant_arg, QuantArg *out_quant_arg); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_DEPTH_TO_SPACE_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/div_int8.h b/mindspore/lite/nnacl/int8/div_int8.h deleted file mode 100644 index 8522f7247c..0000000000 --- a/mindspore/lite/nnacl/int8/div_int8.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_DIV_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_DIV_INT8_H_ - -#include "nnacl/op_base.h" -#include "nnacl/errorcode.h" -#include "nnacl/int8/quantize.h" -#include "nnacl/int8/fixed_point.h" - -#ifdef __cplusplus -extern "C" { -#endif -int DivInt8(int8_t *input0_data, int8_t *input1_data, int8_t *output_data, int64_t real_dst_count, DivQuantArg *para); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_DIV_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/fixed_point.h b/mindspore/lite/nnacl/int8/fixed_point.h deleted file mode 100644 index 92e3981208..0000000000 --- a/mindspore/lite/nnacl/int8/fixed_point.h +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_QUANTIZATION_FIXED_POINT_H_ -#define MINDSPORE_LITE_NNACL_QUANTIZATION_FIXED_POINT_H_ - -#include -#include -#ifdef ENABLE_NEON -#include -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -// returns the high-32 bits of a * b with rounding -// assume that a and b is divided by 2^31, who fall into [-1, 1] -// so the mantissa of a * b is (a / 2^31) * (b / 2^31) * 2^31= (a * b) / 2^31 -// actually we compute 2 * a * b / 2^32 -// and take 32 bits of mantissa for rounding -int SaturatingRoundingDoublingHighMul(int a, int b); - -int16_t SaturatingRoundingDoublingHighMulInt16(int16_t a, int16_t b); - -// division by a 2^exponent with rounding -// or arithmetic right shift with rounding -int RoundingDivideByPOT(int x, int exponent); - -int UpwardRounding(int x, int exponent); - -int MultiplyByQuantizedMultiplier(int32_t value, int32_t multiplier, int32_t left_shift, int32_t right_shift); - -int MultiplyByQuantizedMultiplierWithUpwardRounding(int32_t value, int32_t multiplier, int32_t left_shift, - int32_t right_shift); - -int MultiplyByMultiplierAndRightShift(int32_t value, int32_t multiplier, int32_t right_shift); - -int SaturatingRoundingMultiplyByPOT(int32_t x, int exponent); - -int32_t Rescale(int x, int kIntegerBitsSrc, int kIntegerBitsDst); - -int CountLeadingSignBits(int32_t x); - -int32_t ComputerReciprocal(int32_t x, int x_digits, int *recip_shift); - -int exp_on_negative_values(int a, const int tIntegerBits); - -void GetSqrtQuantMultiplierExp(int32_t input, int reverse_shift, int32_t *multiplier, int32_t *shift); - -#ifdef __cplusplus -} -#endif - -#ifdef ENABLE_NEON -int32x4_t RoundingDivideByPOTInt32x4(int32x4_t x, int exponent); - -int32x4_t SaturatingRoundingDoublingHighMulInt32x4(int32x4_t a, int32x4_t b); -#endif - -#endif // MINDSPORE_LITE_NNACL_QUANTIZATION_FIXED_POINT_H_ diff --git a/mindspore/lite/nnacl/int8/hswish_int8.h b/mindspore/lite/nnacl/int8/hswish_int8.h deleted file mode 100644 index fd18fe4371..0000000000 --- a/mindspore/lite/nnacl/int8/hswish_int8.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INT8_HSWISH_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_HSWISH_INT8_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/errorcode.h" -#include "nnacl/int8/fixed_point.h" - -typedef struct HswishQuantArg { - double input_scale; - int32_t input_zp; - double output_scale; - int32_t output_zp; - int16_t relu6_multiplier_fixedpoint_int16; - int32_t relu6_multiplier_exponent; - int16_t output_multiplier_fixedpoint_int16; - int32_t output_multiplier_exponent; -} HswishQuantArg; - -#ifdef __cplusplus -extern "C" { -#endif -int HSwishInt8(const int8_t *src, int length, int8_t *dst, HswishQuantArg *arg); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_HSWISH_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/l2_norm_int8.h b/mindspore/lite/nnacl/int8/l2_norm_int8.h deleted file mode 100644 index b26fa8bf76..0000000000 --- a/mindspore/lite/nnacl/int8/l2_norm_int8.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INT8_L2_NORM_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_L2_NORM_INT8_H_ - -#include "nnacl/l2_norm_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int L2NormalizationInt8(const int8_t *input_data, int8_t *output_data, const L2NormParameter *param, - const L2NormQuantArg *quant_param, const int begin, const int end); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_L2_NORM_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/layer_norm_int8.h b/mindspore/lite/nnacl/int8/layer_norm_int8.h deleted file mode 100644 index 3e23478ffa..0000000000 --- a/mindspore/lite/nnacl/int8/layer_norm_int8.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INT8_LAYER_NORM_H_ -#define MINDSPORE_LITE_NNACL_INT8_LAYER_NORM_H_ - -#include "nnacl/errorcode.h" -#include "nnacl/layer_norm_parameter.h" -#include "nnacl/int8/fixed_point.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int LayerNormInt8(const int8_t *src_data, const float *gamma_data, const float *beta_data, int8_t *dst_data, - LayerNormParameter *param, LayerNormQuantArg *quant, int task_id); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_LAYER_NORM_H_ diff --git a/mindspore/lite/nnacl/int8/matmul_int8.h b/mindspore/lite/nnacl/int8/matmul_int8.h deleted file mode 100644 index 2389e98ed9..0000000000 --- a/mindspore/lite/nnacl/int8/matmul_int8.h +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_MATMUL_H_ -#define MINDSPORE_LITE_NNACL_INT8_MATMUL_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/matmul_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -/* 4x16 16x4 -> 4x4 */ -/* matmul */ -void MatMulInt8_16x4(const int8_t *a, const int8_t *b, int *dst, int row_4, int col_4, int deep_16, - const int *input_sum, const int *bias); -void RowMajor2Row16x4MajorInt8(int8_t *src_ptr, int8_t *dst_ptr, int row, int col); -void RowMajor2Col16x4MajorInt8(int8_t *src, int row, int col, int8_t *dst); -void CalcInputSums(int8_t *input, int row, int col, int weight_zp, int *dst, DataOrder order); -void CalcWeightBiasSums(int8_t *weight, int row, int col, int input_zp, int *weight_zp_ptr, const int *bias, int *dst, - DataOrder order, bool filter_per_channel); -void MatmulInt8Opt(const int8_t *a, const int8_t *b, int8_t *dst, int row, int col, int deep16, const int *a_sums, - const int *bias, int act_min, int act_max, int out_zp, int32_t *multiplier, int32_t *left_shift, - int32_t *right_shift, size_t stride, size_t filter_peroc, int32_t *filter_zp); - -/* 8x4 4x8 -> 8x8 */ -/* optimize conv */ -void RowMajor2Row8x4MajorInt8(const int8_t *src_ptr, int8_t *dst_ptr, int row, int col); -void MatMulInt8_8x8_r(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4, - size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift, - int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, int32_t maxi, - size_t per_channel); - -/* 4x16 16x2 -> 4x2 */ -/* arm32 conv1x1 */ -void RowMajor2Row2x16MajorInt8(int8_t *src_ptr, int8_t *dst_ptr, int row, int col); -void RowMajor2Col16x2MajorInt8(int8_t *src_ptr, int8_t *dst_ptr, int row, int col); -void MatMulInt8_4x2_r(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_16, - size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift, - int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, int32_t maxi, - bool peroc); - -/* 4x4 4x16 -> 4x16 */ -/* optimize conv1x1 */ -void RowMajor2Row4x16MajorInt8(const int8_t *src_ptr, int8_t *dst_ptr, int row, int col); -void PackInput4x4AndInputSumPert(const int8_t *src_input, int8_t *packed_input, int32_t *input_sum, - size_t input_channel, size_t plane_size, int32_t filter_zp); -void MatMulInt8_4x16_r(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4, - size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift, - int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, int32_t maxi, - size_t per_channel, int32_t *filter_zp); - -#ifdef ENABLE_ARM64 -void MatmulInt8Neon64(const int8_t *a, const int8_t *b, int8_t *dst, int row4, int col4, int deep16, const int *a_sums, - const int *bias, int act_min, int act_max, int out_zp, int32_t *multiplier, int32_t *left_shift, - int32_t *right_shift, int row, int col, int stride, int filter_peroc); - -void MatMulR4Int8Neon64(const int8_t *a, const int8_t *b, int32_t *dst, int row4, int col4, int deep16, - const int *input_sum, const int *bias); -#endif -#ifdef ENABLE_ARM32 -void MatmulInt8Neon32(const int8_t *a, const int8_t *b, int8_t *dst, int row, int col, int deep16, - const int *input_sums, const int *weight_bias, int act_min, int act_max, int out_zp, - int *multiplier, int *left_shift, int *right_shift, int stride, int per_channel); -#endif -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_SRC_BACKEND_ARM_NNACL_INT8_MATMUL_H_ diff --git a/mindspore/lite/nnacl/int8/mul_int8.h b/mindspore/lite/nnacl/int8/mul_int8.h deleted file mode 100644 index 61ccfd8918..0000000000 --- a/mindspore/lite/nnacl/int8/mul_int8.h +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_MUL_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_MUL_INT8_H_ - -#include "nnacl/op_base.h" -#include "nnacl/mul_parameter.h" -#include "nnacl/int8/common_func_int8.h" -#include "nnacl/int8/fixed_point.h" -#ifdef ENABLE_NEON -#include -#endif - -#ifdef __cplusplus -extern "C" { -#endif -void Mul(int8_t *input0_data, int8_t *input1_data, int8_t *output_data, int64_t real_dst_count, MulQuantArg para); -void FastMul(int8_t *input0_data, int8_t *input1_data, int8_t *output_data, int depth, int64_t real_dst_count, - bool input1_broad, MulQuantArg para); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_MUL_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/pack_int8.h b/mindspore/lite/nnacl/int8/pack_int8.h deleted file mode 100644 index ea51ac89ad..0000000000 --- a/mindspore/lite/nnacl/int8/pack_int8.h +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_PACK_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_PACK_INT8_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/int8/matmul_int8.h" -#include "nnacl/conv_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void PackNHWCToNHWC4Int8(const void *src, void *dst, int batch, int plane, int channel); -void PackNHWC4ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel); -void PackNHWCToNHWC8Int8(const void *src, void *dst, int batch, int plane, int channel); -void PackNHWC8ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel); -void PackNCHWToNC8HW8Int8(const void *src, void *dst, int batch, int plane, int channel); -void PackNC4HW4ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel); -void PackNHWCToC8HWN8Int8(const void *src, void *dst, int batch, int plane, int channel); -void PackNCHWToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel); -void PackNHWCToNCHWInt8(const void *src, void *dst, int batch, int plane, int channel); - -void PackInputSum16x4Int8(const int8_t *input, int32_t *input_sum, int32_t *filter_zp, ConvParameter *conv_param); -void PackInputSum16x4PerLayer(const int8_t *src, int32_t *dst, int32_t filter_zp, size_t row4, size_t col16); -void PackInputToC8Int8(const int8_t *input_data, int16_t *packed_input, ConvParameter *conv_param); -void PackWeightToC8Int8(const int8_t *origin_weight_data, int16_t *packed_weight_data, ConvParameter *conv_param); -void Im2ColPackUnitInt8Opt(const int8_t *input_data, int8_t *packed_input, int8_t *matmul_input, int real_cal_num, - int block_index, int32_t *filter_zp, int32_t *input_sum, ConvParameter *conv_param, - bool per_channel, bool is_optimize); -#ifdef ENABLE_ARM -void PreSum4x16Int8Pert(const int8_t *src, int32_t *sum, size_t row4, size_t col16, int32_t filter_zp); -void PreSum4x16Int8Peroc(const int8_t *src, int32_t *sum, int32_t *zp, size_t hw4, size_t ic16, int32_t oc_div, - size_t oc_res, size_t stride); -#endif - -void PackDepthwiseInt8Input(const int8_t *src, int16_t *dst, const ConvParameter *conv_param); -void PackDepthwiseInt8Weight(const int8_t *origin_weight, int16_t *packed_weight_, int plane, int channel, - ConvQuantArg *quant_qrg); -void PackDeconvDepthwiseInt8Weight(const int8_t *origin_weight, int16_t *packed_weight_, int plane, int channel, - ConvQuantArg *quant_qrg); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_PAD_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/pad_int8.h b/mindspore/lite/nnacl/int8/pad_int8.h deleted file mode 100644 index 41da165558..0000000000 --- a/mindspore/lite/nnacl/int8/pad_int8.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_PAD_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_PAD_INT8_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/pad_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -int PadConstant4D(const int8_t *in_data, int8_t *out_data, const int32_t *in_dims, const int32_t *out_dims, - const int32_t *paddings, const int tid, const int thread_num); -void MirrorPadInt8(const int8_t *input_data, int8_t *output_data, const int *input_shape, const PadParameter *pad_param, - int begin, int end); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_PAD_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/pooling_int8.h b/mindspore/lite/nnacl/int8/pooling_int8.h deleted file mode 100644 index 623aa05dbd..0000000000 --- a/mindspore/lite/nnacl/int8/pooling_int8.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_POOLING_H_ -#define MINDSPORE_LITE_NNACL_INT8_POOLING_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include "nnacl/op_base.h" -#include "nnacl/fp32/pooling_fp32.h" - -#ifdef __cplusplus -extern "C" { -#endif -#define MAX_MAXPOOL_SIZE 256 - -int AvgPoolingInt8(const int8_t *input_ptr, int8_t *output_ptr, PoolingParameter *pooling_param, int task_id); - -int AvgPoolingOptInt8(const int8_t *input_ptr, int8_t *output_ptr, PoolingParameter *pooling_param, int task_id); - -void MaxPoolingInt8(const int8_t *input_ptr, int8_t *output_ptr, PoolingParameter *pooling_param, int task_id); - -void MaxPoolingWithQuantInt8(const int8_t *input_ptr, int8_t *output_ptr, PoolingParameter *pooling_param, int task_id); - -void MaxPoolingOptInt8(const int8_t *input_ptr, int8_t *output_ptr, PoolingParameter *pooling_param, int task_id); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_POOLING_H_ diff --git a/mindspore/lite/nnacl/int8/power_int8.h b/mindspore/lite/nnacl/int8/power_int8.h deleted file mode 100644 index 86fe650980..0000000000 --- a/mindspore/lite/nnacl/int8/power_int8.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_POWER_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_POWER_INT8_H_ - -#include "nnacl/op_base.h" -#include "nnacl/power_parameter.h" -#include "nnacl/int8/quantize.h" - -#ifdef __cplusplus -extern "C" { -#endif -int PowerInt8(const int8_t *input_ptr, int8_t *exp_ptr, int8_t *output_ptr, int count, PowerParameter *parameter); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_POWER_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/quant_dtype_cast_int8.h b/mindspore/lite/nnacl/int8/quant_dtype_cast_int8.h deleted file mode 100644 index cc61782c6b..0000000000 --- a/mindspore/lite/nnacl/int8/quant_dtype_cast_int8.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_QUANTDTYPECAST_H_ -#define MINDSPORE_LITE_NNACL_INT8_QUANTDTYPECAST_H_ - -#include "nnacl/op_base.h" - -typedef struct QuantDTypeCastParameter { - OpParameter op_parameter_; - int32_t srcT; - int32_t dstT; -} QuantDTypeCastParameter; - -#ifdef __cplusplus -extern "C" { -#endif -int DoDequantizeInt8ToFp32(const int8_t *quant_values, float *real_values, float scale, int32_t zp, int size); -int DoQuantizeFp32ToInt8(const float *real_values, int8_t *quant_values, float scale, int32_t zp, int size, - bool uint8_flag); -int DoDequantizeUInt8ToFp32(const uint8_t *quant_values, float *real_values, float scale, int32_t zp, int size); -int DoQuantizeFp32ToUInt8(const float *real_values, uint8_t *quant_values, float scale, int32_t zp, int size); -int Int8ToUInt8(const int8_t *quant_values, uint8_t *real_values, int size); -int UInt8ToInt8(const uint8_t *real_values, int8_t *quant_values, int size); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_QUANTDTYPECAST_H_ diff --git a/mindspore/lite/nnacl/int8/quantize.h b/mindspore/lite/nnacl/int8/quantize.h deleted file mode 100644 index 06a4b5fa55..0000000000 --- a/mindspore/lite/nnacl/int8/quantize.h +++ /dev/null @@ -1,219 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_QUANTIZATION_QUANTIZE_H_ -#define MINDSPORE_LITE_NNACL_QUANTIZATION_QUANTIZE_H_ - -#include -#include -#include "nnacl/op_base.h" - -#define INPUT_PER_CHANNEL 0b001 -#define FILTER_PER_CHANNEL 0b010 -#define OUTPUT_PER_CHANNEL 0b100 - -typedef struct ConvQuantArg { - RoundingMode round_mode_; - CalFixedMultiplierMode quant_multiplier_mode_; - QuantArg *input_quant_args_; - QuantArg *filter_quant_args_; - QuantArg *output_quant_args_; - double *real_multiplier_; - int32_t *left_shift_; - int32_t *right_shift_; - int32_t *quant_multiplier_; - int32_t *out_act_min_; - int32_t *out_act_max_; - size_t input_arg_num_; - size_t filter_arg_num_; - size_t output_arg_num_; - uint8_t per_channel_; -} ConvQuantArg; - -typedef struct ConcatQuantArg { - QuantArg *in_args_; - QuantArg out_args_; - int8_t output_activation_min_; - int8_t output_activation_max_; -} ConcatQuantArg; - -typedef struct PreluQuantArg { - int *input_sizes_; - int output_size_; - int **input_shapes_; - int *output_shape_; - size_t input_num_; - size_t output_dim_; - float alpha_; - QuantArg in_args_; - QuantArg out_args_; - int output_activation_min_; - int output_activation_max_; - QuantArg *in_quant_args_; - QuantArg out_quant_args_; -} PreluQuantArg; - -typedef struct CropQuantArg { - QuantArg in_args_; - QuantArg out_args_; - int output_activation_min_; - int output_activation_max_; -} CropQuantArg; - -typedef struct ArithSelfQuantArg { - QuantArg in_args_; - QuantArg out_args_; - int output_activation_min_; - int output_activation_max_; - int output_multiplier_; - int shift_left_; - int shift_right_; -} ArithSelfQuantArg; - -typedef struct GatherQuantArg { - double alpha_; - int zp_in_; - int zp_out_; -} GatherQuantArg; - -typedef struct SoftmaxQuantArg { - QuantArg in_quant_args_; - QuantArg out_quant_arg_; - int output_activation_min_; - int output_activation_max_; - int output_multiplier_; - int shift_left_; - int shift_right_; -} SoftmaxQuantArg; - -typedef struct SubQuantArg { - QuantArg in0_args_; - QuantArg in1_args_; - QuantArg out_args_; - int output_activation_min_; - int output_activation_max_; - int input0_multiplier_; - int input1_multiplier_; - int output_multiplier_; - int input0_shift_; - int input1_shift_; - int output_shift_; - int left_shift_result0_; - int left_shift_result1_; - int right_shift0_; - int right_shift1_; - int left_shift_out_; - int right_shift_out_; -} SubQuantArg; - -typedef struct ArithmeticQuantArg { - QuantArg in0_args_; - QuantArg in1_args_; - QuantArg out_args_; -} ArithmeticQuantArg; - -typedef struct DivQuantArg { - QuantArg in0_args_; - QuantArg in1_args_; - QuantArg out_args_; - int output_activation_min_; - int output_activation_max_; - int output_multiplier_; - int output_shift_; -} DivQuantArg; - -typedef struct ReduceQuantArg { - double in_scale_; - int32_t in_zp_; - double out_scale_; - int32_t out_zp_; - int32_t in_out_multiplier_; - int in_out_left_shift_; - int in_out_right_shift_; - int32_t mean_multiplier_; - int mean_left_shift_; - int mean_right_shift_; - int32_t prod_multiplier_; - int prod_left_shift_; - int prod_right_shift_; - int32_t sum_square_multiplier_; - int sum_square_left_shift_; - int sum_square_right_shift_; -} ReduceQuantArg; - -typedef struct LeakyReluQuantArg { - OpParameter op_parameter_; - PreluQuantArg quant_arg; - float slope_; - int64_t axis_; - int *in_shape_; - int *out_shape_; - int input_dim_; - int element_num; -} LeakyReluQuantArg; - -typedef struct ResizeQuantArg { - int32_t ratio_x_; - int32_t ratio_y_; - int32_t *x_axis_index_; - int32_t *x_axis_lower_; - int32_t *x_axis_upper_; - int32_t *y_axis_index_; - int32_t *y_axis_lower_; - int32_t *y_axis_upper_; -} ResizeQuantArg; - -typedef struct ResizeFloatScaleQuantArg { - float ratio_x_; - float ratio_y_; - float *x_axis_index_; - int32_t *x_axis_lower_; - int32_t *x_axis_upper_; - float *y_axis_index_; - int32_t *y_axis_lower_; - int32_t *y_axis_upper_; -} ResizeFloatScaleQuantArg; - -#ifdef __cplusplus -extern "C" { -#endif - -void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift); - -void QuantizeMultiplierSmallerThanOne(double double_multiplier, int32_t *quantized_multiplier, int *right_shift); - -void QuantizeRoundParameterWithDoublePrecision(double double_multiplier, int32_t *quantized_multiplier, int *left_shift, - int *right_shift); - -void QuantizeRoundParameterWithSinglePrecision(double double_multiplier, int32_t *quantized_multiplier, int *left_shift, - int *right_shift); - -uint8_t QuantizeToUint8(float real_value, float scale, int32_t zp); - -int32_t QuantizeToInt8(float real_value, float scale, int32_t zp); - -void CalculateActivationRangeQuantized(bool is_relu, bool is_relu6, int32_t zp, float scale, int *mini, int *maxi); -// quantize from float to int8 -void Quantize(const float *input_data, int length, float scale, int zero_point, int8_t *output_data); - -// dequantize from int8 to float -void Dequantize(int8_t *input_data, int length, float scale, int zero_point, float *output_data); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_QUANTIZATION_QUANTIZE_H_ diff --git a/mindspore/lite/nnacl/int8/reduce_int8.h b/mindspore/lite/nnacl/int8/reduce_int8.h deleted file mode 100644 index 44d845f5ff..0000000000 --- a/mindspore/lite/nnacl/int8/reduce_int8.h +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_REDUCE_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_REDUCE_INT8_H_ - -#include "nnacl/int8/quantize.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int ReduceMeanN(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); -int ReduceMeanH(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); -int ReduceMeanW(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); -int ReduceMeanC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); -int ReduceMeanNH(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); -int ReduceMeanNW(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); -int ReduceMeanNC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); -int ReduceMeanHW(int n, int plane, int count, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg, - int32_t bias); -int ReduceMeanHC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); -int ReduceMeanWC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); -int ReduceMeanNHW(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); -int ReduceMeanNHC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); -int ReduceMeanNWC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); -int ReduceMeanHWC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); -int ReduceMeanNHWC(int n, int h, int w, int c, int8_t *in_data, int8_t *out_data, QuantMulArg quant_arg); - -int ReduceMeanInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, - int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); -int ReduceMeanLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, - int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); -int ReduceSumInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, - int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); -int ReduceSumLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, - int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); -int ReduceMaxInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, - int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); -int ReduceMaxLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, - int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); -int ReduceMinInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, - int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); -int ReduceMinLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, - int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); -int ReduceProdLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, - int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); -int ReduceProdInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, - int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); -int ReduceSumSquareLastAxis(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, - int8_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); -int ReduceSumSquareInt8(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, - int32_t *dst_data, const ReduceQuantArg *quant, const int tid, const int thread_num); -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_INT8_REDUCE_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/relux_int8.h b/mindspore/lite/nnacl/int8/relux_int8.h deleted file mode 100644 index 78b78596e8..0000000000 --- a/mindspore/lite/nnacl/int8/relux_int8.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INT8_RELU_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_RELU_INT8_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/errorcode.h" -#include "nnacl/int8/fixed_point.h" -#include "nnacl/int8/quantize.h" - -typedef struct ReluXQuantArg { - QuantArg input_arg; - QuantArg output_arg; - int input_multiplier_; - int left_shift_; - int right_shift_; - int quantized_output_min; - int quantized_output_max; -} ReluXQuantArg; - -#ifdef __cplusplus -extern "C" { -#endif -void ReluXInt8(const int8_t *src, int length, int8_t *dst, ReluXQuantArg *arg); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_RELU_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/reshape_int8.h b/mindspore/lite/nnacl/int8/reshape_int8.h deleted file mode 100644 index 5e88d85988..0000000000 --- a/mindspore/lite/nnacl/int8/reshape_int8.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_RESHAHPE_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_RESHAHPE_INT8_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/reshape_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -void Int8Reshape(int8_t *input_ptr, int8_t *output_ptr, int64_t real_dst_count, ReshapeQuantArg para); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_RESHAHPE_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/resize_int8.h b/mindspore/lite/nnacl/int8/resize_int8.h deleted file mode 100644 index 49a328262a..0000000000 --- a/mindspore/lite/nnacl/int8/resize_int8.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INT8_RESIZE_H_ -#define MINDSPORE_LITE_NNACL_INT8_RESIZE_H_ - -#ifdef ENABLE_NEON -#include -#endif -#include -#include "nnacl/op_base.h" -#include "nnacl/int8/quantize.h" -#include "nnacl/resize_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -int ResizeBilinearInt8(const int8_t *input_ptr, int8_t *output_ptr, int batch, int in_h, int in_w, int out_h, int out_w, - int channel, int index, int count, ResizeQuantArg quant_arg); - -int ResizeBilinearWithFloatScaleInt8(const int8_t *input_ptr, int8_t *output_ptr, int batch, int in_h, int in_w, - int out_h, int out_w, int channel, int index, int count, - ResizeFloatScaleQuantArg quant_arg); - -int ResizeNearestNeighborInt8Simple(const int8_t *input_data, int8_t *output_data, const int *input_shape, - const int *output_shape, const bool align_corners, int tid, int thread_num); - -int ResizeNearestNeighborInt8(const int8_t *input_data, int8_t *output_data, const int *input_shape, - const int *output_shape, const bool align_corners, const QuantMulArg *multiplier, - QuantArg *quant_in, QuantArg *quant_out, int tid, int thread_num); - -void ComputeNearestNeighborInt(const int32_t pos, const int in_size, const int32_t new_size, const bool align_corners, - int32_t *nearest); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_RESIZE_H_ diff --git a/mindspore/lite/nnacl/int8/scale_int8.h b/mindspore/lite/nnacl/int8/scale_int8.h deleted file mode 100644 index 993e5b808c..0000000000 --- a/mindspore/lite/nnacl/int8/scale_int8.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_SCALE_INT8_H_ -#define MINDSPORE_LITE_NNACL_SCALE_INT8_H_ - -#include "nnacl/op_base.h" -#include "nnacl/scale.h" -#include "nnacl/nnacl_common.h" - -#ifdef __cplusplus -extern "C" { -#endif -void DoScaleInt8(const int8_t *in_data, int8_t *out_data, const int8_t *scale, const ScaleParameter *scale_param, - int real_dst_count); -void DoScaleWithBiasInt8(const int8_t *in_data, int8_t *out_data, const int8_t *scale, const int8_t *offset, - const ScaleParameter *scale_param, int real_dst_count); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_SCALE_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/slice_int8.h b/mindspore/lite/nnacl/int8/slice_int8.h deleted file mode 100644 index 70ac1fbd8c..0000000000 --- a/mindspore/lite/nnacl/int8/slice_int8.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INT8_SLICE_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_SLICE_INT8_H_ - -#include -#include -#include "nnacl/op_base.h" -#include "nnacl/slice_parameter.h" -#include "nnacl/int8/fixed_point.h" - -#ifdef __cplusplus -extern "C" { -#endif -int SliceInt8NoParallel(const int8_t *input, int8_t *output, SliceParameter *param); -int SliceInt8(const int8_t *input, int8_t *output, SliceParameter *param, int thread_id); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_SLICE_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/softmax_int8.h b/mindspore/lite/nnacl/int8/softmax_int8.h deleted file mode 100644 index 83c8aa7ca0..0000000000 --- a/mindspore/lite/nnacl/int8/softmax_int8.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_SOFTMAX_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_SOFTMAX_INT8_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/softmax_parameter.h" -#include "nnacl/int8/fixed_point.h" -#include "nnacl/int8/quantize.h" - -#ifdef __cplusplus -extern "C" { -#endif -int SoftmaxInt8(const int8_t *input_ptr, int8_t *output_ptr, int count, int *exp_data, int *sum_data, - SoftmaxQuantArg quant_param, SoftmaxParameter *parameter); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_SOFTMAX_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/space_to_batch_int8.h b/mindspore/lite/nnacl/int8/space_to_batch_int8.h deleted file mode 100644 index a8e6df17a8..0000000000 --- a/mindspore/lite/nnacl/int8/space_to_batch_int8.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INT8_SPACE_TO_BATCH_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_SPACE_TO_BATCH_INT8_H_ - -#include "nnacl/op_base.h" -#include "nnacl/fp32/space_to_batch_fp32.h" - -#ifdef __cplusplus -extern "C" { -#endif -void DoSpaceToBatchNHWCInt8(const int8_t *input, int8_t *output, const int *block_sizes, const int *in_shape, - const int *out_shape); -void DoSpaceToBatchPaddingNHWCInt8(const int8_t *input, int8_t *output, SpaceToBatchParameter *param, int32_t zp); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_SPACE_TO_BATCH_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/splice_int8.h b/mindspore/lite/nnacl/int8/splice_int8.h deleted file mode 100644 index 326aea4a0d..0000000000 --- a/mindspore/lite/nnacl/int8/splice_int8.h +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_INT8_SPLICE_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_SPLICE_INT8_H_ -#include -#include "nnacl/splice_parameter.h" -#ifdef __cplusplus -extern "C" { -#endif - -void SpliceInt8(const int8_t *src_data, int src_row, int src_col, const SpliceParameter *splice_parameter, - int8_t *dst_data, int dst_row, int dst_col); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_INT8_SPLICE_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/split_int8.h b/mindspore/lite/nnacl/int8/split_int8.h deleted file mode 100644 index 065674c56a..0000000000 --- a/mindspore/lite/nnacl/int8/split_int8.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_SPLIT_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_SPLIT_INT8_H_ - -#include -#include "nnacl/op_base.h" -#include "nnacl/split_parameter.h" - -#ifdef __cplusplus -extern "C" { -#endif -int Int8DoSplit(int8_t *in_data, int8_t **out_data, const int *input_shape, int offset, int num_unit, - SplitParameter *split_param); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_SPLIT_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/sub_int8.h b/mindspore/lite/nnacl/int8/sub_int8.h deleted file mode 100644 index 6764072e70..0000000000 --- a/mindspore/lite/nnacl/int8/sub_int8.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_SUB_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_SUB_INT8_H_ - -#include "nnacl/op_base.h" -#include "nnacl/int8/quantize.h" - -#ifdef __cplusplus -extern "C" { -#endif -int SubInt8(int8_t *input0_data, int8_t *input1_data, int8_t *output_data, int64_t real_dst_count, SubQuantArg *para); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_SUB_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/tanh_int8.h b/mindspore/lite/nnacl/int8/tanh_int8.h deleted file mode 100644 index 1ad2cbecf4..0000000000 --- a/mindspore/lite/nnacl/int8/tanh_int8.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_TANH_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_TANH_INT8_H_ - -#include "nnacl/op_base.h" -#include "nnacl/int8/quantize.h" -#include "nnacl/int8/fixed_point.h" -#include "nnacl/int8/quant_dtype_cast_int8.h" -#include "nnacl/fp32/activation_fp32.h" - -typedef struct TanhQuantParameter { - int32_t in_zp_; - int32_t out_zp_; - double in_scale_; - double out_scale_; -} TanhQuantParameter; - -#ifdef __cplusplus -extern "C" { -#endif - -void TanhInt8(const int8_t *input_ptr, int8_t *output_ptr, int size, TanhQuantParameter *quant); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_TANH_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/topk_int8.h b/mindspore/lite/nnacl/int8/topk_int8.h deleted file mode 100644 index 6dfb76725c..0000000000 --- a/mindspore/lite/nnacl/int8/topk_int8.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_TOPK_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_TOPK_INT8_H_ - -#include "nnacl/op_base.h" -#include "nnacl/fp32/topk_fp32.h" - -typedef struct TopkNodeInt8 { - int8_t element; - int32_t index; -} TopkNodeInt8; - -#ifdef __cplusplus -extern "C" { -#endif -void TopkInt8(int8_t *input_data, int8_t *output_data, int32_t *output_index, TopkParameter *parameter); -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_TOPK_INT8_H_ diff --git a/mindspore/lite/nnacl/int8/transpose_int8.h b/mindspore/lite/nnacl/int8/transpose_int8.h deleted file mode 100644 index c90c485bc1..0000000000 --- a/mindspore/lite/nnacl/int8/transpose_int8.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INT8_TRANSPOSE_INT8_H_ -#define MINDSPORE_LITE_NNACL_INT8_TRANSPOSE_INT8_H_ - -#include -#include "nnacl/transpose.h" -#include "nnacl/errorcode.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int DoTransposeInt8(const int8_t *in_data, int8_t *out_data, const int *output_shape, - TransposeParameter *transpose_param, int h_start, int h_end, int *dim_size, int *position); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INT8_TRANSPOSE_INT8_H_ diff --git a/mindspore/lite/nnacl/intrinsics/avx/common_utils.h b/mindspore/lite/nnacl/intrinsics/avx/common_utils.h deleted file mode 100644 index 14589405dc..0000000000 --- a/mindspore/lite/nnacl/intrinsics/avx/common_utils.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_X86_64_AVX_COMMON_UTILS_H_ -#define MINDSPORE_LITE_NNACL_X86_64_AVX_COMMON_UTILS_H_ - -#include - -#ifdef __cplusplus -extern "C" { -#endif -#ifdef __GNUC__ -#if __GNUC__ < 8 -#define _mm256_set_m128i(xmm1, xmm2) \ - _mm256_permute2f128_si256(_mm256_castsi128_si256(xmm1), _mm256_castsi128_si256(xmm2), 2) -#define _mm256_set_m128f(xmm1, xmm2) \ - _mm256_permute2f128_ps(_mm256_castps128_ps256(xmm1), _mm256_castps128_ps256(xmm2), 2) -#endif -#endif - -// Signed saturating Add -__m128i _mm_adds_epi32(__m128i a, __m128i b); - -// Signed rounding shift right -__m128i _mm_rshr_epi32(__m128i a, int shift); - -// Signed saturating Rounding Doubling Multiply return High half -__m128i _mm_qrdmulh_epi32(__m128i a, __m128i b); -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_X86_64_AVX_COMMON_UTILS_H_ diff --git a/mindspore/lite/nnacl/intrinsics/ms_simd_instructions.h b/mindspore/lite/nnacl/intrinsics/ms_simd_instructions.h deleted file mode 100644 index ad73a8ebf6..0000000000 --- a/mindspore/lite/nnacl/intrinsics/ms_simd_instructions.h +++ /dev/null @@ -1,229 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INTRINSICS_MS_SIMD_INSTRUCTIONS_H_ -#define MINDSPORE_LITE_NNACL_INTRINSICS_MS_SIMD_INSTRUCTIONS_H_ -#include -#ifdef ENABLE_ARM -#include -#endif -#if defined(ENABLE_SSE) || defined(ENABLE_AVX) -#include -#endif - -#ifdef ENABLE_ARM -#define MS_FLOAT32X4 float32x4_t -#define MS_INT32X4 int32x4_t -#define MS_LDQ_F32 vld1q_f32 -#define MS_LDQ_EPI32 vld1q_s32 -#define MS_ADDQ_F32 vaddq_f32 -#define MS_ADDQ_EPI32 vaddq_s32 -#define MS_MOVQ_F32 vmovq_n_f32 -#define MS_MOVQ_EPI32 vmovq_n_s32 -#define MS_SUBQ_F32 vsubq_f32 -#define MS_MLAQ_F32(src1, src2, src3) vmlaq_f32(src1, src2, src3) -#define MS_STQ_F32 vst1q_f32 -#define MS_STQ_EPI32 vst1q_s32 -#define MS_MAXQ_F32 vmaxq_f32 -#define MS_MAXQ_EPI32 vmaxq_s32 -#define MS_MINQ_F32 vminq_f32 -#define MS_MINQ_EPI32 vminq_s32 -#define MS_MULQ_F32(src1, src2) vmulq_f32(src1, src2) -#define MS_MULQ_EPI32(src1, src2) vmulq_s32(src1, src2) -#ifdef ENABLE_ARM64 -#define MS_DIVQ_F32(src1, src2) vdivq_f32(src1, src2) -#else -inline static float32x4_t vrecp(float32x4_t v) { - float32x4_t r = vrecpeq_f32(v); - r = vmulq_f32(vrecpsq_f32(v, r), r); - r = vmulq_f32(vrecpsq_f32(v, r), r); - return r; -} -#define MS_DIVQ_F32(src1, src2) vmulq_f32(src1, vrecp(src2)) -#endif -#define MS_MULQ_N_F32(src1, src2) vmulq_n_f32(src1, src2) -#define MS_MULQ_N_EPI32(src1, src2) vmulq_n_s32(src1, src2) -#define MS_DIVQ_N_F32(src1, src2) vdivq_n_f32(src1, src2) -#define MS_SLLIQ_EPI32(src1, src2) vshlq_s32(src1, vmovq_n_s32(src2)) -#define MS_CVTQPS_EPI32(src) vcvtq_s32_f32(src) -#define MS_CVTQEPI32_PS(src) vcvtq_f32_s32(src) -#define MS_CMPGTQ_F32(src1, src2) vcgtq_f32(src1, src2) -#define MS_CMPGTQ_EPI32(src1, src2) vcgtq_s32(src1, src2) -// Note: Compared with X86, the vbslq_f32 parameters are the opposite with _mm_blendv_f32 -#define MS_BLENDQ_F32(src1, src2, src3) vbslq_f32(src3, src2, src1) -#define MS_BLENDQ_EPI32(src1, src2, src3) vbslq_s32(src3, src2, src1) -#define MS_CAST_F32_S32(src) vreinterpretq_f32_s32(src) -#endif - -#if defined(ENABLE_AVX) -#define MS_FLOAT32X8 __m256 -#define MS_INT32X8 __m256i -#define MS_LD256_F32 _mm256_loadu_ps -#define MS_LD256_EPI32(src) _mm256_loadu_si256((__m256i const *)(src)) -#define MS_ADD256_F32 _mm256_add_ps -#define MS_ADD256_EPI32 _mm256_add_epi32 -#define MS_MOV256_F32 _mm256_set1_ps -#define MS_MOV256_EPI32 _mm256_set1_epi32 -#define MS_MLA256_F32(src1, src2, src3) _mm256_add_ps(src1, _mm256_mul_ps(src2, src3)) -#define MS_ST256_F32 _mm256_storeu_ps -#define MS_ST256_EPI32(src1, src2) _mm256_storeu_si256((__m256i *)(src1), src2) -#define MS_SUB256_F32 _mm256_sub_ps -#define MS_MAX256_F32 _mm256_max_ps -#define MS_MAX256_EPI32 _mm256_max_epi32 -#define MS_MIN256_F32 _mm256_min_ps -#define MS_MIN256_EPI32 _mm256_min_epi32 -#define MS_MUL256_F32(src1, src2) _mm256_mul_ps(src1, src2) -#define MS_MUL256_EPI32(src1, src2) _mm256_mul_epi32(src1, src2) -#define MS_DIV256_F32(src1, src2) _mm256_div_ps(src1, src2) -#define MS_MUL256_N_F32(src1, src2) _mm256_mul_ps(src1, _mm256_set1_ps(src2)) -#define MS_MUL256_N_EPI32(src1, src2) _mm256_mul_epi32(src1, _mm256_set1_epi32(src2)) -#define MS_DIV256_N_F32(src1, src2) _mm256_div_ps(src1, _mm256_set1_ps(src2)) -#define MS_SLLI256_EPI32(src1, src2) _mm256_slli_epi32(src1, src2) -#define MS_CVT256PS_EPI32(src) _mm256_cvttps_epi32(src) -#define MS_CVT256EPI32_PS(src) _mm256_cvtepi32_ps(src) // truncate float to int -#define MS_CMP256_F32(src1, src2, src3) _mm256_cmp_ps(src1, src2, src3) -#define MS_CMPGT256_EPI32(src1, src2) _mm256_cmpgt_epi32(src1, src2) -#define MS_BLEND256_F32(src1, src2, src3) _mm256_blendv_ps(src1, src2, src3) -#define MS_BLEND256_EPI32(src1, src2, src3) _mm256_blendv_epi8(src1, src2, src3) -#define MS_CAST256_F32_S32(src) _mm256_castsi256_ps(src) -#endif - -#if defined(ENABLE_SSE) -#define MS_FLOAT32X4 __m128 -#define MS_INT32X4 __m128i -#define MS_LDQ_F32 _mm_loadu_ps -#define MS_LDQ_EPI32(src) _mm_loadu_si128((__m128i const *)(src)) -#define MS_ADDQ_F32 _mm_add_ps -#define MS_ADDQ_EPI32 _mm_add_epi32 -#define MS_MOVQ_F32 _mm_set1_ps -#define MS_MOVQ_EPI32 _mm_set1_epi32 -#define MS_MLAQ_F32(src1, src2, src3) _mm_add_ps(src1, _mm_mul_ps(src2, src3)) -#define MS_STQ_F32 _mm_storeu_ps -#define MS_STQ_EPI32(src1, src2) _mm_storeu_si128((__m128i *)(src1), src2) -#define MS_SUBQ_F32 _mm_sub_ps -#define MS_MAXQ_F32 _mm_max_ps -#define MS_MAXQ_EPI32 _mm_max_epi32 -#define MS_MINQ_F32 _mm_min_ps -#define MS_MINQ_EPI32 _mm_min_epi32 -#define MS_MULQ_F32(src1, src2) _mm_mul_ps(src1, src2) -#define MS_MULQ_EPI32(src1, src2) _mm_mul_epi32(src1, src2) -#define MS_DIVQ_F32(src1, src2) _mm_div_ps(src1, src2) -#define MS_MULQ_N_F32(src1, src2) _mm_mul_ps(src1, _mm_set1_ps(src2)) -#define MS_MULQ_N_EPI32(src1, src2) _mm_mul_epi32(src1, _mm_set1_epi32(src2)) -#define MS_DIVQ_N_F32(src1, src2) _mm_div_ps(src1, _mm_set1_ps(src2)) -#define MS_SLLIQ_EPI32(src1, src2) _mm_slli_epi32(src1, src2) -#define MS_CVTQPS_EPI32(src) _mm_cvttps_epi32(src) // truncate float to int -#define MS_CVTQEPI32_PS(src) _mm_cvtepi32_ps(src) -#define MS_CMPGTQ_F32(src1, src2) _mm_cmpgt_ps(src1, src2) -#define MS_CMPGTQ_EPI32(src1, src2) _mm_cmpgt_epi32(src1, src2) -#define MS_BLENDQ_F32(src1, src2, src3) _mm_blendv_ps(src1, src2, src3) -#define MS_BLENDQ_EPI32(src1, src2, src3) _mm_blendv_epi8(src1, src2, src3) -#define MS_CAST_F32_S32(src) _mm_castsi128_ps(src) -#endif - -#define LOAD256X8_F32(src, input_ptr, num) \ - MS_FLOAT32X8 src##1 = MS_LD256_F32(input_ptr + 0 * num); \ - MS_FLOAT32X8 src##2 = MS_LD256_F32(input_ptr + 1 * num); \ - MS_FLOAT32X8 src##3 = MS_LD256_F32(input_ptr + 2 * num); \ - MS_FLOAT32X8 src##4 = MS_LD256_F32(input_ptr + 3 * num); \ - MS_FLOAT32X8 src##5 = MS_LD256_F32(input_ptr + 4 * num); \ - MS_FLOAT32X8 src##6 = MS_LD256_F32(input_ptr + 5 * num); \ - MS_FLOAT32X8 src##7 = MS_LD256_F32(input_ptr + 6 * num); \ - MS_FLOAT32X8 src##8 = MS_LD256_F32(input_ptr + 7 * num); - -#define STORE256X8_F32(output_ptr, num, dst) \ - MS_ST256_F32(output_ptr + 0 * num, dst##1); \ - MS_ST256_F32(output_ptr + 1 * num, dst##2); \ - MS_ST256_F32(output_ptr + 2 * num, dst##3); \ - MS_ST256_F32(output_ptr + 3 * num, dst##4); \ - MS_ST256_F32(output_ptr + 4 * num, dst##5); \ - MS_ST256_F32(output_ptr + 5 * num, dst##6); \ - MS_ST256_F32(output_ptr + 6 * num, dst##7); \ - MS_ST256_F32(output_ptr + 7 * num, dst##8); - -#define LOAD128X8_F32(src, input_ptr, num) \ - MS_FLOAT32X4 src##1 = MS_LDQ_F32(input_ptr + 0 * num); \ - MS_FLOAT32X4 src##2 = MS_LDQ_F32(input_ptr + 1 * num); \ - MS_FLOAT32X4 src##3 = MS_LDQ_F32(input_ptr + 2 * num); \ - MS_FLOAT32X4 src##4 = MS_LDQ_F32(input_ptr + 3 * num); \ - MS_FLOAT32X4 src##5 = MS_LDQ_F32(input_ptr + 4 * num); \ - MS_FLOAT32X4 src##6 = MS_LDQ_F32(input_ptr + 5 * num); \ - MS_FLOAT32X4 src##7 = MS_LDQ_F32(input_ptr + 6 * num); \ - MS_FLOAT32X4 src##8 = MS_LDQ_F32(input_ptr + 7 * num); - -#define STORE128X8_F32(output_ptr, num, dst) \ - MS_STQ_F32(output_ptr + 0 * num, dst##1); \ - MS_STQ_F32(output_ptr + 1 * num, dst##2); \ - MS_STQ_F32(output_ptr + 2 * num, dst##3); \ - MS_STQ_F32(output_ptr + 3 * num, dst##4); \ - MS_STQ_F32(output_ptr + 4 * num, dst##5); \ - MS_STQ_F32(output_ptr + 5 * num, dst##6); \ - MS_STQ_F32(output_ptr + 6 * num, dst##7); \ - MS_STQ_F32(output_ptr + 7 * num, dst##8); - -static inline MS_FLOAT32X4 MS_TANHX4_F32(MS_FLOAT32X4 src) { - static const float data[] = {378.0f, 17325.0f, 135135.0f, 28.0f, 3150.0f, 62370.0f}; - static const MS_FLOAT32X4 neg = {-1.0f, -1.0f, -1.0f, -1.0f}; - static const MS_FLOAT32X4 pos = {1.0f, 1.0f, 1.0f, 1.0f}; - MS_FLOAT32X4 square = src * src; - MS_FLOAT32X4 a = (((square + data[0]) * square + data[1]) * square + data[2]) * src; - MS_FLOAT32X4 b = ((data[3] * square + data[4]) * square + data[5]) * square + data[2]; - return MS_MINQ_F32(MS_MAXQ_F32(a / b, neg), pos); -} - -#ifdef ENABLE_AVX -static inline MS_FLOAT32X8 MS_TANHX8_F32(MS_FLOAT32X8 src) { - static const float data[] = {378.0f, 17325.0f, 135135.0f, 28.0f, 3150.0f, 62370.0f}; - static const MS_FLOAT32X8 neg = {-1.0f, -1.0f, -1.0f, -1.0f, -1.0f, -1.0f, -1.0f, -1.0f}; - static const MS_FLOAT32X8 pos = {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}; - MS_FLOAT32X8 square = src * src; - MS_FLOAT32X8 a = (((square + data[0]) * square + data[1]) * square + data[2]) * src; - MS_FLOAT32X8 b = ((data[3] * square + data[4]) * square + data[5]) * square + data[2]; - return MS_MIN256_F32(MS_MAX256_F32(a / b, neg), pos); -} -#endif - -static inline MS_FLOAT32X4 MS_ERFX4_F32(MS_FLOAT32X4 src) { - MS_FLOAT32X4 dst; - dst[0] = erff(src[0]); - dst[1] = erff(src[1]); - dst[2] = erff(src[2]); - dst[3] = erff(src[3]); - return dst; -} - -#ifdef ENABLE_ARM64 -static inline float16x8_t MS_TANHX8_F16(float16x8_t src) { - float32x4_t src_low = vcvt_f32_f16(vget_low_f16(src)); - float32x4_t src_high = vcvt_f32_f16(vget_high_f16(src)); - return vcombine_f16(vcvt_f16_f32(MS_TANHX4_F32(src_low)), vcvt_f16_f32(MS_TANHX4_F32(src_high))); -} - -static inline float16x8_t MS_ERFX8_F16(float16x8_t src) { - float16x8_t dst; - dst[0] = erff(src[0]); - dst[1] = erff(src[1]); - dst[2] = erff(src[2]); - dst[3] = erff(src[3]); - dst[4] = erff(src[4]); - dst[5] = erff(src[5]); - dst[6] = erff(src[6]); - dst[7] = erff(src[7]); - return dst; -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INTRINSICS_MS_SIMD_INSTRUCTIONS_H_ diff --git a/mindspore/lite/nnacl/intrinsics/sse/sse_common.h b/mindspore/lite/nnacl/intrinsics/sse/sse_common.h deleted file mode 100644 index fd48d184c8..0000000000 --- a/mindspore/lite/nnacl/intrinsics/sse/sse_common.h +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_INTRINSICS_SSE_SSE_COMMON_H_ -#define MINDSPORE_LITE_NNACL_INTRINSICS_SSE_SSE_COMMON_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -void ActBlock1(__m128 *v1, size_t relu, size_t relu6); -void ActBlock2(__m128 *v1, __m128 *v2, size_t relu, size_t relu6); -void ActBlock4(__m128 *v1, __m128 *v2, __m128 *v3, __m128 *v4, size_t relu, size_t relu6); -void ActBlock8(__m128 *v1, __m128 *v2, __m128 *v3, __m128 *v4, __m128 *v5, __m128 *v6, __m128 *v7, __m128 *v8, - size_t relu_type); - -void WriteCol1(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, - __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); -void WriteCol2(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, - __m128 *dst7, __m128 *dst8, int stride, int r); -void WriteCol2Opt(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, - __m128 *dst7, __m128 *dst8, int stride, int r); -void WriteCol3(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, - __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); -void WriteCol4(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, - __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); -void WriteCol5(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, - __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); -void WriteCol6(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, - __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); -void WriteCol7(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, - __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); -void WriteCol8(float **dst, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, __m128 *dst6, - __m128 *dst7, __m128 *dst8, int stride, int extra_stride, int r); - -void DoBiasBlock8(const float *bias_ptr, __m128 *dst1, __m128 *dst2, __m128 *dst3, __m128 *dst4, __m128 *dst5, - __m128 *dst6, __m128 *dst7, __m128 *dst8); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_INTRINSICS_SSE_SSE_COMMON_H_ diff --git a/mindspore/lite/nnacl/l2_norm_parameter.h b/mindspore/lite/nnacl/l2_norm_parameter.h deleted file mode 100644 index 4343ef3f5e..0000000000 --- a/mindspore/lite/nnacl/l2_norm_parameter.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_L2NORM_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_L2NORM_PARAMETER_H_ - -#include "nnacl/op_base.h" -#include "nnacl/int8/quantize.h" - -typedef struct L2NormParameter { - // Primitive parameter - OpParameter op_parameter_; - float epsilon_; - int axis_[MAX_SHAPE_SIZE]; - // shape correlative - size_t axis_num_; - int data_num_; - int *shape_; - size_t shape_num_; - // other parameter - ActType act_type_; -} L2NormParameter; - -typedef struct { - QuantArg in_; - QuantArg out_; -} L2NormQuantArg; - -#endif // MINDSPORE_LITE_NNACL_L2NORM_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/layer_norm_parameter.h b/mindspore/lite/nnacl/layer_norm_parameter.h deleted file mode 100644 index 928662d9d7..0000000000 --- a/mindspore/lite/nnacl/layer_norm_parameter.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_LAYER_NORM_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_LAYER_NORM_PARAMETER_H_ - -#include "nnacl/op_base.h" -#include "nnacl/int8/quantize.h" - -enum ElementwiseMode { ELEMENTWISE_NOT = 0, ELEMENTWISE_PER_CHANNEL = 1, ELEMENTWISE_PER_NUM = 2 }; -typedef struct LayerNormParameter { - // Primitive parameter - OpParameter op_parameter_; - float epsilon_; - enum ElementwiseMode elementwise_mode_; - bool elementwise_affine_; - int begin_norm_axis_; - int begin_params_axis_; - // shape correlative - int norm_inner_size_; - int norm_outer_size_; - int params_inner_size_; - int params_outer_size_; - int normalized_dims_; - int normalized_shape_[MAX_SHAPE_SIZE]; - // other parameter - int thread_count_; - int thread_outsize_; -} LayerNormParameter; - -typedef struct LayerNormQuantArg { - int32_t in_zp_; - int32_t out_zp_; - double in_scale_; - double out_scale_; -} LayerNormQuantArg; - -#endif // MINDSPORE_LITE_NNACL_LAYER_NORM_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/lsh_projection_parameter.h b/mindspore/lite/nnacl/lsh_projection_parameter.h deleted file mode 100644 index 9275279b05..0000000000 --- a/mindspore/lite/nnacl/lsh_projection_parameter.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_LSH_PROJECTION_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_LSH_PROJECTION_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct LshProjectionParameter { - // Primitive parameter - OpParameter op_parameter_; - // shape correlative - int hash_shape_[2]; - // other parameter - int lsh_type_; - int feature_num_; - char **hash_buffs_; - size_t hash_buff_size_; - int64_t thread_stride_; -} LshProjectionParameter; - -#endif // MINDSPORE_LITE_NNACL_LSH_PROJECTION_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/lstm_parameter.h b/mindspore/lite/nnacl/lstm_parameter.h deleted file mode 100644 index b400a2437c..0000000000 --- a/mindspore/lite/nnacl/lstm_parameter.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_LSTM_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_LSTM_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct LstmParameter { - // Primitive parameter - OpParameter op_parameter_; - // shape correlative - int input_size_; - int hidden_size_; // output_size - int seq_len_; - int batch_; - // other parameter - int output_step_; - bool bidirectional_; - float zoneout_cell_; - float zoneout_hidden_; - int input_row_align_; - int input_col_align_; - int state_row_align_; - int state_col_align_; -} LstmParameter; - -#endif // MINDSPORE_LITE_NNACL_LSTM_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/matmul_parameter.h b/mindspore/lite/nnacl/matmul_parameter.h deleted file mode 100644 index 9f9c023568..0000000000 --- a/mindspore/lite/nnacl/matmul_parameter.h +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_MATMUL_H_ -#define MINDSPORE_LITE_NNACL_MATMUL_H_ - -#include "nnacl/op_base.h" - -typedef void (*MATMUL_OPT_R4_FUNC)(const int8_t *a, const int8_t *b, int *dst, int row_4, int col_4, int deep_16, - const int *input_sum, const int *bias); - -typedef void (*MATMUL_OPT_R_FUNC)(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4, - size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift, - int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, - int32_t maxi, size_t per_channel); - -typedef void (*MATMUL_OPT_DP_FUNC)(const int8_t *a, const int8_t *b, int8_t *dst, size_t row, size_t col, size_t deep_4, - size_t stride, const int32_t *input_sum, const int32_t *bias, int32_t *left_shift, - int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini, - int32_t maxi, size_t per_channel, int *filter_zp); - -typedef enum OutType { OutType_C8 = 0, OutType_Nhwc = 1, OutType_TileC8 = 2 } OutType; - -typedef struct MatMulParameter { - // Primitive parameter - OpParameter op_parameter_; - bool has_bias_; - - // other parameter - int row_; - int col_; - int row_4_; - int row_6_; - int row_12_; - int row_16_; - int row_align_; - int col_4_; - int col_8_; - int col_align_; - int deep_; - int deep_4_; - int deep_16_; - int batch; - bool a_transpose_; /* false : row-major */ - bool b_transpose_; /* true : col-major */ - bool a_const_; - bool b_const_; - ActType act_type_; - bool use_axis_; - int axis_; -} MatMulParameter; - -typedef struct MatmulQuantParameter { - QuantArg input_; - QuantArg weight_; - QuantArg output_; - int32_t out_act_min_; - int32_t out_act_max_; - float *filter_scale_; - int32_t *filter_zp_; - int32_t *left_shift_; - int32_t *right_shift_; - int32_t *quant_multiplier_; -} MatmulQuantParameter; - -#endif // MINDSPORE_LITE_NNACL_MATMUL_H_ diff --git a/mindspore/lite/nnacl/mul_parameter.h b/mindspore/lite/nnacl/mul_parameter.h deleted file mode 100644 index 7c2d8fe181..0000000000 --- a/mindspore/lite/nnacl/mul_parameter.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_MUL_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_MUL_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct MulQuantArg { - QuantArg in_quant_args_[2]; - QuantArg out_quant_arg_; - int output_multiplier_; - int output_activation_min_; - int output_activation_max_; - int shift_left_; - int shift_right_; -} MulQuantArg; - -typedef struct MulParameter { - // Primitive parameter - OpParameter op_parameter_; - // other parameter - int thread_count_; - MulQuantArg mul_quant_arg_; -} MulParameter; - -#endif // MINDSPORE_LITE_NNACL_MUL_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/nnacl_common.h b/mindspore/lite/nnacl/nnacl_common.h deleted file mode 100644 index 3e02fe8991..0000000000 --- a/mindspore/lite/nnacl/nnacl_common.h +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_NNACL_COMMON_H_ -#define MINDSPORE_LITE_NNACL_NNACL_COMMON_H_ - -#include "nnacl/op_base.h" - -#ifdef __cplusplus -extern "C" { -#endif - -static inline void ComputeStrides(const int *shape, int *strides, const int ndim) { - int stride = 1; - for (int i = ndim - 1; i >= 0; i--) { - strides[i] = stride; - stride *= shape[i]; - } -} - -static inline void ComputeAxisDims(const int *shape, int shape_size, int axis, int *out_count, int *axis_count, - int *in_count) { - *out_count = 1; - *in_count = 1; - for (int i = 0; i < shape_size; i++) { - if (i < axis) *out_count = (*out_count) * shape[i]; - if (i == axis) *axis_count = shape[axis]; - if (i > axis) *in_count = (*in_count) * shape[i]; - } -} - -static const unsigned int FP32_BIT_SIZE = 32; -static const unsigned int FP32_EXPONENT_BIAS = 127; -static const unsigned int FP32_SIGNIFICAND = 23; -static const unsigned int FP32_EXPONENT_MAX = 255; -static const unsigned int FP16_BIT_SIZE = 16; -static const unsigned int FP16_EXPONENT_BIAS = 15; -static const unsigned int FP16_SIGNIFICAND = 10; -static const int FP16_EXPONENT_MAX = 30; -static const int FP16_EXPONENT_MIN = -10; -float ShortToFloat32(uint16_t src_value); -uint16_t Float32ToShort(float src_value); - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_NNACL_COMMON_H_ diff --git a/mindspore/lite/nnacl/nnacl_utils.h b/mindspore/lite/nnacl/nnacl_utils.h deleted file mode 100644 index 735425d3bb..0000000000 --- a/mindspore/lite/nnacl/nnacl_utils.h +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_NNACL_UTILS_H_ -#define MINDSPORE_LITE_NNACL_NNACL_UTILS_H_ - -#include -#ifdef __cplusplus -extern "C" { -#endif - -#if defined(__arm__) || defined(__aarch64__) -uint32_t getHwCap(int hwcap_type); -#endif - -#ifdef DEBUG -#include -#define NNACL_ASSERT(f) assert(f) -#else -#define NNACL_ASSERT(f) ((void)0) -#endif - -#ifdef __cplusplus -} -#endif -#endif // MINDSPORE_LITE_NNACL_NNACL_UTILS_H_ diff --git a/mindspore/lite/nnacl/non_max_suppression_parameter.h b/mindspore/lite/nnacl/non_max_suppression_parameter.h deleted file mode 100644 index 409dc9b342..0000000000 --- a/mindspore/lite/nnacl/non_max_suppression_parameter.h +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_NON_MAX_SUPPRESSION_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_NON_MAX_SUPPRESSION_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct NMSParameter { - // Primitive parameter - OpParameter op_parameter_; - int center_point_box_; -} NMSParameter; - -#endif // MINDSPORE_LITE_NNACL_NON_MAX_SUPPRESSION_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/op_base.h b/mindspore/lite/nnacl/op_base.h deleted file mode 100644 index 476fdb8534..0000000000 --- a/mindspore/lite/nnacl/op_base.h +++ /dev/null @@ -1,107 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_OP_BASE_H_ -#define MINDSPORE_LITE_NNACL_OP_BASE_H_ - -#include -#include -#include -#include -#if defined(ENABLE_AVX) || defined(ENABLE_SSE) || defined(ENABLE_ARM) -#include "nnacl/intrinsics/ms_simd_instructions.h" -#endif - -#define C2NUM 2 -#define C4NUM 4 -#define C6NUM 6 -#define C8NUM 8 -#define C12NUM 12 -#define C16NUM 16 -#define TILE_NUM 8 - -#define MSMIN(x, y) ((x) < (y) ? (x) : (y)) -#define MSMAX(x, y) ((x) > (y) ? (x) : (y)) - -#define UP_DIV(x, y) (((x) + (y) - (1)) / (y)) -#define UP_ROUND(x, y) (((x) + (y) - (1)) / (y) * (y)) -#define UP_ROUND_DIV(x, y) (x % y == 0 ? (x / y) : (x / y) + 1) -#define DOWN_DIV(x, y) (((x) - (y) + (1)) / (y)) - -#define MSVALID(left, x, right) (MSMIN((MSMAX(left, x)), right)) - -#define COMM_SHAPE_SIZE 4 -#define MAX_SHAPE_SIZE 8 - -#define DIMENSION_4D 4 -#define DIMENSION_6D 6 -#define DIMENSION_7D 7 -#define kInputIndex 0 -#define kWeightIndex 1 -#define kBiasIndex 2 -#define kOutputIndex 0 -#define kNHWC_N 0 -#define kNHWC_H 1 -#define kNHWC_W 2 -#define kNHWC_C 3 -#define kInputSize1 2 -#define kInputSize2 3 -#define MAX_AXIS_SIZE 6 -#define MAX_LEN 256 -#define FLT16_MAX 65504 - -typedef enum LiteDataType { - kDataTypeFloat, - kDataTypeFloat16, - kDataTypeInt, - kDataTypeInt8, - KDataTypeBool, -} LiteDataType; - -typedef enum DataOrder { - RowMajor, - ColMajor, -} DataOrder; - -typedef struct OpParameter { - char name_[100]; - bool infer_flag_; - int type_; - int thread_num_; - int quant_type_; -} OpParameter; - -typedef struct QuantArg { - float scale_; - int32_t zp_; -} QuantArg; - -typedef struct QuantMulArg { - int32_t multiplier_; - int left_shift_; - int right_shift_; -} QuantMulArg; - -typedef enum ActType { ActType_No, ActType_Relu, ActType_Sigmod, ActType_Relu6, ActType_Prelu } ActType; -typedef enum PadMode { Pad_pad, Pad_same, Pad_valid } PadMode; -typedef enum RoundingMode { Rounding_No, Rounding_Away_from_zero, Rounding_Up } RoundingMode; -typedef enum CalFixedMultiplierMode { - Method_No, - Method_SinglePrecision, - Method_DoublePrecision -} CalFixedMultiplierMode; - -#endif // MINDSPORE_LITE_NNACL_OP_BASE_H_ diff --git a/mindspore/lite/nnacl/pack.h b/mindspore/lite/nnacl/pack.h deleted file mode 100644 index f51ec8410a..0000000000 --- a/mindspore/lite/nnacl/pack.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_PACK_H_ -#define MINDSPORE_LITE_NNACL_PACK_H_ - -#include "nnacl/fp32/pack_fp32.h" -#include "nnacl/int8/pack_int8.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_NNACL_PACK_H_ diff --git a/mindspore/lite/nnacl/pad_parameter.h b/mindspore/lite/nnacl/pad_parameter.h deleted file mode 100644 index fb9c7ae136..0000000000 --- a/mindspore/lite/nnacl/pad_parameter.h +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_PAD_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_PAD_PARAMETER_H_ - -#include "nnacl/op_base.h" - -#define MAX_PAD_SIZE 8 -#define DEFAULT_PAD_NDIMS 4 - -typedef struct PadQuantArg { - QuantArg *in_quant_args_; - QuantArg *out_quanr_args_; - int8_t *constant_value_; -} PadQuantArg; - -typedef struct PadParameter { - // Primitive parameter - OpParameter op_parameter_; - int paddings_[MAX_SHAPE_SIZE]; - int pad_mode_; - float constant_value_; - // shape correlative - int padding_length; - // other parameter - int in_strides[COMM_SHAPE_SIZE]; - int out_strides[DEFAULT_PAD_NDIMS]; - int mirror_offset_; - PadQuantArg pad_quant_arg_; -} PadParameter; - -typedef struct MirrorPadBlock { - int out_offset_; - int out_stride_[DEFAULT_PAD_NDIMS]; - int size_[DEFAULT_PAD_NDIMS]; -} MirrorPadBlock; - -#endif // MINDSPORE_LITE_NNACL_PAD_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/pooling_parameter.h b/mindspore/lite/nnacl/pooling_parameter.h deleted file mode 100644 index 84f7e1b068..0000000000 --- a/mindspore/lite/nnacl/pooling_parameter.h +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_POOLING_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_POOLING_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef enum PoolMode { PoolMode_No, PoolMode_MaxPool, PoolMode_AvgPool } PoolMode; - -typedef enum RoundMode { RoundMode_No, RoundMode_Ceil, RoundMode_Floor } RoundMode; - -typedef struct PoolingParameter { - // Primitive parameter - OpParameter op_parameter_; - PoolMode pool_mode_; - RoundMode round_mode_; - PadMode pad_mode_; - ActType act_type_; - int avg_mode_; - bool global_; - int window_w_; - int window_h_; - int stride_w_; - int stride_h_; - // shape correlative - int input_w_; - int input_h_; - int input_batch_; - int input_channel_; - int output_w_; - int output_h_; - int output_batch_; - int output_channel_; - int pad_u_; - int pad_d_; - int pad_l_; - int pad_r_; - // other parameter - int thread_num_; - QuantArg **quant_args_; - bool quantize_; -} PoolingParameter; - -#endif // MINDSPORE_LITE_NNACL_POOLING_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/power_parameter.h b/mindspore/lite/nnacl/power_parameter.h deleted file mode 100644 index 34f46a73e8..0000000000 --- a/mindspore/lite/nnacl/power_parameter.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_POWER_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_POWER_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct PowerQuantArg { - QuantArg in_args_; - QuantArg exp_args_; - QuantArg out_args_; - int output_activation_min_; - int output_activation_max_; -} PowerQuantArg; - -typedef struct PowerParameter { - // Primitive parameter - OpParameter op_parameter_; - float power_; - float scale_; - float shift_; - // other parameter - PowerQuantArg quant_arg_; - bool broadcast_; -} PowerParameter; - -#endif // MINDSPORE_LITE_NNACL_POWER_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/predict_parameter.h b/mindspore/lite/nnacl/predict_parameter.h deleted file mode 100644 index a4e0fc857b..0000000000 --- a/mindspore/lite/nnacl/predict_parameter.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_PREDICT_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_PREDICT_PARAMETER_H_ - -#include "nnacl/op_base.h" -typedef struct { - // Primitive parameter - OpParameter op_parameter_; - // other parameter - int output_num; - float weight_threshold; -} PredictParameter; - -typedef struct { - int label; - float weight; -} LabelInfo; -#endif // MINDSPORE_LITE_NNACL_PREDICT_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/prelu_parameter.h b/mindspore/lite/nnacl/prelu_parameter.h deleted file mode 100644 index 5e1087f7f7..0000000000 --- a/mindspore/lite/nnacl/prelu_parameter.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_PRELU_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_PRELU_PARAMETER_H_ - -#include "nnacl/op_base.h" -typedef struct PReluParameter { - // Primitive parameter - OpParameter op_parameter_; - // other parameter - float *slope_; - bool channelShared; - int tile_block_; - int channel_num_; - int input_num_; -} PReluParameter; - -#endif // MINDSPORE_LITE_NNACL_PRELU_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/prior_box_parameter.h b/mindspore/lite/nnacl/prior_box_parameter.h deleted file mode 100644 index f258299b7e..0000000000 --- a/mindspore/lite/nnacl/prior_box_parameter.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_PRIOR_BOX_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_PRIOR_BOX_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct PriorBoxParameter { - // Primitive parameter - OpParameter op_parameter_; - int32_t min_sizes_size; - int32_t min_sizes[MAX_SHAPE_SIZE]; - int32_t max_sizes_size; - int32_t max_sizes[MAX_SHAPE_SIZE]; - int32_t aspect_ratios_size; - float aspect_ratios[MAX_SHAPE_SIZE]; - float variances[COMM_SHAPE_SIZE]; - int32_t image_size_w; - int32_t image_size_h; - float step_w; - float step_h; - bool clip; - bool flip; - float offset; -} PriorBoxParameter; - -#endif // MINDSPORE_LITE_NNACL_PRIOR_BOX_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/random_parameter.h b/mindspore/lite/nnacl/random_parameter.h deleted file mode 100644 index bf3473c16c..0000000000 --- a/mindspore/lite/nnacl/random_parameter.h +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_RNADOM_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_RNADOM_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct RandomParam { - OpParameter op_parameter_; - int seed_; - int seed2_; -} RandomParam; - -#endif // MINDSPORE_LITE_NNACL_RNADOM_STANDARD_NORMAL_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/reduce_parameter.h b/mindspore/lite/nnacl/reduce_parameter.h deleted file mode 100644 index 1e7ace4156..0000000000 --- a/mindspore/lite/nnacl/reduce_parameter.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_REDUCE_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_REDUCE_PARAMETER_H_ -#include "nnacl/op_base.h" - -typedef struct ReduceParameter { - // primitive parameter - OpParameter op_parameter_; - int axes_[MAX_SHAPE_SIZE]; - bool keep_dims_; - int mode_; - bool reduce_to_end_; - float coeff; - - // other parameter - int num_axes_; -} ReduceParameter; - -#endif // MINDSPORE_LITE_NNACL_REDUCE_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/reshape_parameter.h b/mindspore/lite/nnacl/reshape_parameter.h deleted file mode 100644 index 1e64e8a3f6..0000000000 --- a/mindspore/lite/nnacl/reshape_parameter.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_RESHAHPE_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_RESHAHPE_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct ReshapeQuantArg { - QuantArg in_args_; - QuantArg out_args_; - int output_activation_min_; - int output_activation_max_; -} ReshapeQuantArg; - -typedef struct ReshapeParameter { - // primitive parameter - OpParameter op_parameter_; - int shape_dim_; - int shape_[MAX_SHAPE_SIZE]; - - // other parameter - ReshapeQuantArg quant_para_; - int thread_count_; -} ReshapeParameter; - -#endif // MINDSPORE_LITE_NNACL_RESHAHPE_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/resize_parameter.h b/mindspore/lite/nnacl/resize_parameter.h deleted file mode 100644 index 5f7c486482..0000000000 --- a/mindspore/lite/nnacl/resize_parameter.h +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_RESIZE_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_RESIZE_PARAMETER_H_ - -#include "nnacl/op_base.h" -typedef struct ResizeParameter { - // primitive parameter - OpParameter op_parameter_; - int method_; - int64_t new_height_; - int64_t new_width_; - int coordinate_transform_mode_; - float cubic_coeff_; - bool preserve_aspect_ratio_; -} ResizeParameter; - -typedef struct CropAndResizeParameter { - // primitive parameter - OpParameter op_parameter_; - int method_; - float extrapolation_value_; -} CropAndResizeParameter; -#endif // MINDSPORE_LITE_NNACL_RESIZE_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/reverse_sequence_parameter.h b/mindspore/lite/nnacl/reverse_sequence_parameter.h deleted file mode 100644 index dd5ce36462..0000000000 --- a/mindspore/lite/nnacl/reverse_sequence_parameter.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_REVERSE_SEQUENCE_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_REVERSE_SEQUENCE_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct ReverseSequenceParameter { - // primitive parameter - OpParameter op_parameter_; - int seq_axis_; - int batch_axis_; - - // shape correlative - int input_shape0_[5]; - int output_shape_[5]; - int input_stride_[5]; - int output_stride_[5]; - - // other parameter - int ndim_; - int outer_count_; - int outer_stride_; - int inner_count_; - int inner_stride_; - int copy_byte_size_; - int total_data_size_; - bool is_seq_length_int32_; -} ReverseSequenceParameter; - -#endif // MINDSPORE_LITE_NNACL_REVERSE_SEQUENCE_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/scale.h b/mindspore/lite/nnacl/scale.h deleted file mode 100644 index dbca958234..0000000000 --- a/mindspore/lite/nnacl/scale.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_SCALE_H_ -#define MINDSPORE_LITE_NNACL_SCALE_H_ - -#include "nnacl/op_base.h" - -typedef struct ScaleParameter { - // primitive parameter - OpParameter op_parameter_; - int axis_; - int activation_type_; - - // shape correlative - int outer_size_; - int axis_size_; - int inner_size_; - - // other parameter - bool const_scale_; - bool const_offset_; - QuantMulArg scale_mul_arg_; - QuantMulArg offset_mul_arg_; - int input_zp_; - int scale_zp_; - int offset_zp_; - int output_zp_; - int output_activation_min_; - int output_activation_max_; -} ScaleParameter; - -#endif // MINDSPORE_LITE_NNACL_SCALE_H_ diff --git a/mindspore/lite/nnacl/sigmoid_parameter.h b/mindspore/lite/nnacl/sigmoid_parameter.h deleted file mode 100644 index f5cade2fb8..0000000000 --- a/mindspore/lite/nnacl/sigmoid_parameter.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_SIGMOID_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_SIGMOID_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct SigmoidParameter { - // primitive parameter - OpParameter op_parameter_; - - // shape correlative - const int *in_shape_; - const int *out_shape_; - - // other parameter - SigmoidQuantArg quant_arg; - double alpha_; - int thread_count_; - int64_t offset_[MAX_SHAPE_SIZE]; - int64_t in_offset_[MAX_SHAPE_SIZE]; - int64_t axis_; - int input_dim_; - int element_num; -} SigmoidParameter; - -#endif // MINDSPORE_LITE_NNACL_SIGMOID_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/skip_gram_parameter.h b/mindspore/lite/nnacl/skip_gram_parameter.h deleted file mode 100644 index 26d3c28cbf..0000000000 --- a/mindspore/lite/nnacl/skip_gram_parameter.h +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_SKIP_GRAM_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_SKIP_GRAM_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct SkipGramParameter { - // primitive parameter - OpParameter op_parameter_; - bool include_all_ngrams; - int max_skip_size; - int ngram_size; -} SkipGramParameter; - -#endif // MINDSPORE_LITE_NNACL_SKIP_GRAM_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/slice_parameter.h b/mindspore/lite/nnacl/slice_parameter.h deleted file mode 100644 index d3627965e5..0000000000 --- a/mindspore/lite/nnacl/slice_parameter.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_SLICE_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_SLICE_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct SliceQuantArg { - QuantArg in_args_; - QuantArg out_args_; - int output_activation_min_; - int output_activation_max_; -} SliceQuantArg; - -typedef struct SliceParameter { - // primitive parameter - OpParameter op_parameter_; - - // shape correlative - int32_t shape_[COMM_SHAPE_SIZE]; - int32_t begin_[COMM_SHAPE_SIZE]; - int32_t end_[COMM_SHAPE_SIZE]; - int32_t size_[COMM_SHAPE_SIZE]; - int32_t axis_[COMM_SHAPE_SIZE]; - - // other parameter - SliceQuantArg quant_arg_; - int32_t param_length_; -} SliceParameter; - -#endif // MINDSPORE_LITE_NNACL_SLICE_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/softmax_parameter.h b/mindspore/lite/nnacl/softmax_parameter.h deleted file mode 100644 index 902ffb7485..0000000000 --- a/mindspore/lite/nnacl/softmax_parameter.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_SOFTMAX_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_SOFTMAX_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct SoftmaxParameter { - // primitive parameter - OpParameter op_parameter_; - int32_t axis_; - - // shape correlative - int input_shape_[5]; - - // other parameter - int element_size_; - int n_dim_; -} SoftmaxParameter; - -#endif // MINDSPORE_LITE_NNACL_SOFTMAX_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/sparse_to_dense_parameter.h b/mindspore/lite/nnacl/sparse_to_dense_parameter.h deleted file mode 100644 index 41c0e22087..0000000000 --- a/mindspore/lite/nnacl/sparse_to_dense_parameter.h +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_SPARSE_TO_DENSE_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_SPARSE_TO_DENSE_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct SparseToDenseParameter { - // primitive parameter - OpParameter op_parameter_; - bool validate_indices_; - - // other parameter - int thread_num_; -} SparseToDenseParameter; - -#endif // MINDSPORE_LITE_NNACL_SPARSE_TO_DENSE_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/splice_parameter.h b/mindspore/lite/nnacl/splice_parameter.h deleted file mode 100644 index 8063960af7..0000000000 --- a/mindspore/lite/nnacl/splice_parameter.h +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_SPLICE_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_SPLICE_PARAMETER_H_ -#include "nnacl/op_base.h" -typedef struct SpliceParameter { - OpParameter op_parameter_; - int context_dim_; - int forward_indexes_dim_; - int src_to_dst_row_offset_; - int *context_; - int *forward_indexes_; - int output_dim_; -} SpliceParameter; -#endif // MINDSPORE_LITE_NNACL_SPLICE_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/split_parameter.h b/mindspore/lite/nnacl/split_parameter.h deleted file mode 100644 index 7eeb4a6212..0000000000 --- a/mindspore/lite/nnacl/split_parameter.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_SPLIT_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_SPLIT_PARAMETER_H_ - -#include "nnacl/op_base.h" - -#define SPLIT_STRIDES_SIZE 32 - -typedef struct SplitQuantArg { - QuantArg in_args_; - QuantArg out_args_[20]; - int output_activation_min_; - int output_activation_max_; -} SplitQuantArg; - -typedef struct SplitParameter { - // primitive parameter - OpParameter op_parameter_; - int num_split_; - int *split_sizes_; - int split_dim_; - - // shape correlative - int strides_[SPLIT_STRIDES_SIZE]; - - // other parameter - SplitQuantArg quant_arg_; - int n_dims_; - int split_count_; -} SplitParameter; - -#endif // MINDSPORE_LITE_NNACL_SPLIT_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/squeeze_parameter.h b/mindspore/lite/nnacl/squeeze_parameter.h deleted file mode 100644 index 77a419aa12..0000000000 --- a/mindspore/lite/nnacl/squeeze_parameter.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_SQUEEZE_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_SQUEEZE_PARAMETER_H_ -#include "nnacl/op_base.h" -#include "nnacl/int8/quantize.h" - -#define SQUEEZE_OFFSET_MAX_SIZE 4 - -typedef struct SqueezeQuantArg { - QuantArg *in_quant_args_; - QuantArg *out_quant_args_; -} SqueezeQuantArg; - -typedef struct SqueezeParameter { - // primitive parameter - OpParameter op_parameter_; - int axis_[8]; - size_t axis_size_; - - // shape correlative - const int *in_shape_; - const int *out_shape_; - int offset_size_; - int64_t offset_[SQUEEZE_OFFSET_MAX_SIZE]; - int64_t in_offset_[SQUEEZE_OFFSET_MAX_SIZE]; - int input_dim_; - // other parameter - SqueezeQuantArg quant_arg; -} SqueezeParameter; - -#endif // MINDSPORE_LITE_NNACL_SQUEEZE_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/stack_parameter.h b/mindspore/lite/nnacl/stack_parameter.h deleted file mode 100644 index 1f714aad5a..0000000000 --- a/mindspore/lite/nnacl/stack_parameter.h +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_STACK_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_STACK_PARAMETER_H_ - -#include "nnacl/op_base.h" -typedef struct StackParameter { - // primitive parameter - OpParameter op_parameter_; - int32_t axis_; -} StackParameter; - -#endif // MINDSPORE_LITE_NNACL_STACK_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/strided_slice_parameter.h b/mindspore/lite/nnacl/strided_slice_parameter.h deleted file mode 100644 index 91fa2d6571..0000000000 --- a/mindspore/lite/nnacl/strided_slice_parameter.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_STRIDED_SLICE_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_STRIDED_SLICE_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct StridedSliceParameter { - // primitive parameter - OpParameter op_parameter_; - int begins_[MAX_SHAPE_SIZE]; - int ends_[MAX_SHAPE_SIZE]; - int strides_[MAX_SHAPE_SIZE]; - int isScale; - - // shape correlative - int in_shape_length_; - int in_shape_[MAX_SHAPE_SIZE]; - - // other parameter - int num_axes_; - LiteDataType data_type; - int begins_mask_; - int ends_mask_; - int ellipsisMask_; - int newAxisMask_; - int shrinkAxisMask_; -} StridedSliceParameter; - -#endif // MINDSPORE_LITE_NNACL_STRIDED_SLICE_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/tensor_c.h b/mindspore/lite/nnacl/tensor_c.h deleted file mode 100644 index 482bd361f6..0000000000 --- a/mindspore/lite/nnacl/tensor_c.h +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_TENSOR_C_H_ -#define MINDSPORE_LITE_NNACL_TENSOR_C_H_ -#include "nnacl/op_base.h" - -typedef struct TensorC { - bool is_ready_; - int data_type_; - int format_; - void *data_; - size_t shape_size_; - int shape_[MAX_SHAPE_SIZE]; -} TensorC; - -#endif // MINDSPORE_LITE_NNACL_TENSOR_C_H_ diff --git a/mindspore/lite/nnacl/tensorlist_parameter.h b/mindspore/lite/nnacl/tensorlist_parameter.h deleted file mode 100644 index 0cf8156913..0000000000 --- a/mindspore/lite/nnacl/tensorlist_parameter.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_TENSORLIST_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_TENSORLIST_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct TensorListParameter { - // primitive parameter - OpParameter op_parameter_; - int shape_type_; - int element_dtype_; - - // other parameter - int num_element_; -} TensorListParameter; - -#endif // MINDSPORE_LITE_NNACL_ARG_TENSORLIST_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/transpose.h b/mindspore/lite/nnacl/transpose.h deleted file mode 100644 index b69ddabfb1..0000000000 --- a/mindspore/lite/nnacl/transpose.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_TRANSPOSE_H_ -#define MINDSPORE_LITE_NNACL_TRANSPOSE_H_ - -#include "nnacl/op_base.h" - -#define MAX_TRANSPOSE_DIM_SIZE 6 - -typedef struct TransposeParameter { - // primitive parameter - OpParameter op_parameter_; - int perm_[MAX_SHAPE_SIZE]; - size_t perm_size_; - bool conjugate_; - - // shape correlative - int strides_[MAX_SHAPE_SIZE]; - int out_strides_[MAX_SHAPE_SIZE]; - - // other parameter - int num_axes_; - int data_size_; -} TransposeParameter; - -#endif // MINDSPORE_LITE_NNACL_TRANSPOSE_H_ diff --git a/mindspore/lite/nnacl/unsqueeze_parameter.h b/mindspore/lite/nnacl/unsqueeze_parameter.h deleted file mode 100644 index e543d27209..0000000000 --- a/mindspore/lite/nnacl/unsqueeze_parameter.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_UNSQUEEZE_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_UNSQUEEZE_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct UnSqueezeQuantArg { - int *output_shape_; - float alpha; - int axis_; - size_t input_num_; - QuantArg in_quant_args_; - QuantArg out_quant_args_; -} UnSqueezeQuantArg; - -typedef struct UnSqueezeParameter { - // primitive parameter - OpParameter op_parameter_; - int dims_[COMM_SHAPE_SIZE]; - int num_dim_; - - // shape correlative - const int *in_shape_; - const int *out_shape_; - int64_t offset_[COMM_SHAPE_SIZE]; - int64_t axis_; - - // other parameter - UnSqueezeQuantArg quant_arg; - int thread_count_; -} UnSqueezeParameter; - -#endif // MINDSPORE_LITE_NNACL_UNSQUEEZE_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/unstack_parameter.h b/mindspore/lite/nnacl/unstack_parameter.h deleted file mode 100644 index 09471839f6..0000000000 --- a/mindspore/lite/nnacl/unstack_parameter.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_UNSTACK_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_UNSTACK_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct UnstackParameter { - // primitive parameter - OpParameter op_parameter_; - int num_; - int axis_; - - // other parameter - int pre_dims_; - int axis_dim_; - int after_dims_; -} UnstackParameter; - -#endif // MINDSPORE_LITE_NNACL_UNSTACK_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/upsample_parameter.h b/mindspore/lite/nnacl/upsample_parameter.h deleted file mode 100644 index ab50e51c83..0000000000 --- a/mindspore/lite/nnacl/upsample_parameter.h +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_NNACL_UPSAMPLE_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_UPSAMPLE_PARAMETER_H_ - -#include "nnacl/op_base.h" -typedef struct { - // primitive parameter - OpParameter op_parameter_; - - // other parameter - int method_; // 0 for bilinear; 1 for nearest -} UpsampleParameter; - -#endif // MINDSPORE_LITE_NNACL_UPSAMPLE_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/where_parameter.h b/mindspore/lite/nnacl/where_parameter.h deleted file mode 100644 index 9480d6e2fe..0000000000 --- a/mindspore/lite/nnacl/where_parameter.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_NNACL_WHERE_PARAMETER_H_ -#define MINDSPORE_LITE_NNACL_WHERE_PARAMETER_H_ - -#include "nnacl/op_base.h" - -typedef struct WhereParameter { - // primitive parameter - OpParameter op_parameter_; - - // other parameter - int condition_num_; - int x_num_; - int y_num_; - int max_num_; - - int rank_; - int thread_num_; -} WhereParameter; - -#endif // MINDSPORE_LITE_NNACL_WHERE_PARAMETER_H_ diff --git a/mindspore/lite/src/CMakeLists.txt b/mindspore/lite/src/CMakeLists.txt index f47438f839..b4c68eae56 100644 --- a/mindspore/lite/src/CMakeLists.txt +++ b/mindspore/lite/src/CMakeLists.txt @@ -2,9 +2,7 @@ add_compile_definitions(USE_ANDROID_LOG) if(ENABLE_V0) add_definitions(-DENABLE_V0) endif() -set(LITE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/..) -include_directories(${LITE_DIR}/nnacl/) -include_directories(${LITE_DIR}/nnacl/optimize) +include_directories(${CCSRC_DIR}/backend/kernel_compiler/cpu) if(PLATFORM_ARM32 OR PLATFORM_ARM64) #for performance diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/power_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/power_fp16.h index 9fb53f363b..b59d18edd9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/power_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/power_fp16.h @@ -20,7 +20,7 @@ #include #include "src/lite_kernel.h" #include "include/context.h" -#include "mindspore/lite/nnacl/fp16/power_fp16.h" +#include "nnacl/fp16/power_fp16.h" namespace mindspore::kernel { class PowerFp16CPUKernel : public LiteKernel { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.cc index e810386542..3eed8ce64e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.cc @@ -18,7 +18,7 @@ #include #include "src/runtime/kernel/arm/fp32/l2_norm_fp32.h" #include "include/errorcode.h" -#include "mindspore/lite/nnacl/fp32/l2_norm_fp32.h" +#include "nnacl/fp32/l2_norm_fp32.h" #include "src/runtime/runtime_api.h" using mindspore::kernel::KERNEL_ARCH::kCPU; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/non_max_suppression_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/non_max_suppression_fp32.h index 905c833840..7b75f3b16a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/non_max_suppression_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/non_max_suppression_fp32.h @@ -19,7 +19,7 @@ #include #include #include "src/lite_kernel.h" -#include "mindspore/lite/nnacl/non_max_suppression_parameter.h" +#include "nnacl/non_max_suppression_parameter.h" using mindspore::lite::RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/power_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/power_fp32.h index 127582dda9..fdd754c7cf 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/power_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/power_fp32.h @@ -20,7 +20,7 @@ #include #include "src/lite_kernel.h" #include "include/context.h" -#include "mindspore/lite/nnacl/fp32/power_fp32.h" +#include "nnacl/fp32/power_fp32.h" namespace mindspore::kernel { class PowerCPUKernel : public LiteKernel { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd_fp32.h index c91ab68582..f0a1eb5574 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd_fp32.h @@ -19,7 +19,7 @@ #include #include "src/lite_kernel.h" -#include "mindspore/lite/nnacl/fp32/scatter_nd_fp32.h" +#include "nnacl/fp32/scatter_nd_fp32.h" namespace mindspore::kernel { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram_fp32.h index 2ba154862b..22b5a85450 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram_fp32.h @@ -19,7 +19,7 @@ #include #include "src/lite_kernel.h" -#include "mindspore/lite/nnacl/skip_gram_parameter.h" +#include "nnacl/skip_gram_parameter.h" #include "src/common/string_util.h" namespace mindspore::kernel { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32.cc index 24d9d14e60..57f317b2f9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32.cc @@ -14,12 +14,10 @@ * limitations under the License. */ #include "src/runtime/kernel/arm/fp32/sparse_to_dense_fp32.h" - #include #include - #include "include/errorcode.h" -#include "mindspore/lite/nnacl/fp32/sparse_to_dense_fp32.h" +#include "nnacl/fp32/sparse_to_dense_fp32.h" #include "schema/model_generated.h" #include "schema/ops_generated.h" #include "src/kernel_registry.h" diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32.h index 3b6e44a1f6..d3a2e35603 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32.h @@ -20,7 +20,7 @@ #include "src/lite_kernel.h" #include "include/context.h" -#include "mindspore/lite/nnacl/fp32/sparse_to_dense_fp32.h" +#include "nnacl/fp32/sparse_to_dense_fp32.h" #include "src/runtime/kernel/arm/base/layout_transform.h" using mindspore::lite::InnerContext; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.cc index 0f940a3b70..8c23b32b46 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.cc @@ -17,7 +17,7 @@ #include #include #include "schema/model_generated.h" -#include "mindspore/lite/nnacl/fp32/where_fp32.h" +#include "nnacl/fp32/where_fp32.h" #include "src/kernel_registry.h" #include "include/errorcode.h" #include "src/runtime/runtime_api.h" diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.h index d4ed588c76..34545cdcc2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.h @@ -20,7 +20,7 @@ #include "src/lite_kernel.h" #include "include/context.h" -#include "mindspore/lite/nnacl/fp32/where_fp32.h" +#include "nnacl/fp32/where_fp32.h" #include "src/runtime/kernel/arm/base/layout_transform.h" using mindspore::lite::InnerContext; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike_fp32.cc index abee296f3d..960090b760 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike_fp32.cc @@ -16,7 +16,7 @@ #include "src/runtime/kernel/arm/fp32/zeroslike_fp32.h" #include "schema/model_generated.h" -#include "mindspore/lite/nnacl/base/zeroslike_base.h" +#include "nnacl/base/zeroslike_base.h" #include "src/kernel_registry.h" #include "include/errorcode.h" diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h index 5031240b60..5b5fbc6eb5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h @@ -17,7 +17,7 @@ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_ARGMINMAX_INT8_H_ #include -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" #include "nnacl/int8/arg_min_max_int8.h" #include "nnacl/common_func.h" #include "include/errorcode.h" diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h index 06512bbf82..b986c6f057 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h @@ -20,7 +20,7 @@ #include "include/errorcode.h" #include "nnacl/base/depth_to_space_base.h" #include "nnacl/int8/depth_to_space_int8.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" #include "src/runtime/kernel/arm/base/depth_to_space_base.h" namespace mindspore::kernel { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.h index d54c16fa9f..485e6a945c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.h @@ -18,7 +18,7 @@ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_INT8_GATHERND_INT8_H_ #include -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" #include "src/lite_kernel.h" namespace mindspore::kernel { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.cc index 844425633a..5d38262939 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.cc @@ -17,7 +17,7 @@ #include #include "nnacl/gather_parameter.h" #include "nnacl/int8/gather_int8.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" #include "schema/model_generated.h" #include "src/kernel_registry.h" #include "src/runtime/runtime_api.h" diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.h index 57f5096942..775d0dbbb8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.h @@ -19,7 +19,7 @@ #include #include "nnacl/gather_parameter.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" #include "src/lite_kernel.h" namespace mindspore::kernel { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.h index 655240a8e2..1d7011f11a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.h @@ -20,7 +20,7 @@ #include #include "src/lite_kernel.h" #include "nnacl/int8/hswish_int8.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" namespace mindspore::kernel { class HswishInt8CPUKernel : public LiteKernel { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.h index 2f4d5ac81d..46dc8aa15f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.h @@ -20,7 +20,7 @@ #include #include "include/context.h" #include "nnacl/matmul_parameter.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" #include "src/lite_kernel.h" #include "src/runtime/kernel/arm/int8/matmul_base_int8.h" diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.h index 9bb2e7ce2a..af1aba9803 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.h @@ -19,7 +19,7 @@ #include #include "src/lite_kernel.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" #include "nnacl/power_parameter.h" namespace mindspore::kernel { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.cc index 86ca7c7c66..aef4335582 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.cc @@ -18,7 +18,7 @@ #include "schema/model_generated.h" #include "src/kernel_registry.h" #include "src/runtime/runtime_api.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" #include "nnacl/pack.h" #include "include/errorcode.h" diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.h index bdeaf6da18..2f3d986bd7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.h @@ -21,7 +21,7 @@ #include "src/lite_kernel.h" #include "nnacl/reduce_parameter.h" #include "nnacl/int8/reduce_int8.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" #include "src/runtime/kernel/arm/base/reduce_base.h" diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.h index 7a951e3ca8..0f92bf82e0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.h @@ -19,7 +19,7 @@ #include #include "src/lite_kernel.h" #include "src/runtime/kernel/arm/base/resize_base.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" using mindspore::schema::PrimitiveType_Resize; using mindspore::schema::ResizeMethod; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.cc index 7289577948..0a7c8fdcf2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.cc @@ -18,7 +18,7 @@ #include #include #include "nnacl/int8/sigmoid_int8.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" #include "schema/model_generated.h" #include "src/kernel_registry.h" #include "src/runtime/runtime_api.h" diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h index f956f7a623..ace2a64320 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h @@ -19,7 +19,7 @@ #include #include "src/runtime/kernel/arm/base/slice_base.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" namespace mindspore::kernel { class SliceInt8CPUKernel : public SliceCPUKernel { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.h index 62d83bf2f1..aec062c4eb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.h @@ -19,7 +19,7 @@ #include #include "src/runtime/kernel/arm/base/softmax_base.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" namespace mindspore::kernel { class SoftmaxInt8CPUKernel : public SoftmaxBaseCPUKernel { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/tanh_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/tanh_int8.h index 0be5e503d9..f1ef2ffefe 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/tanh_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/tanh_int8.h @@ -22,7 +22,7 @@ #include #include "src/lite_kernel.h" #include "nnacl/int8/tanh_int8.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" #include "include/errorcode.h" namespace mindspore::kernel { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/fill.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/fill.h index aabe94beb7..390327ebee 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/fill.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/fill.h @@ -18,7 +18,7 @@ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_FILL_H_ #include -#include "mindspore/lite/nnacl/base/fill_base.h" +#include "nnacl/base/fill_base.h" #include "src/runtime/kernel/opencl/opencl_kernel.h" namespace mindspore::kernel { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/power.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/power.h index 6efce285d1..9e821af4b0 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/power.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/power.h @@ -18,7 +18,7 @@ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_POWER_H_ #include -#include "mindspore/lite/nnacl/fp32/power_fp32.h" +#include "nnacl/fp32/power_fp32.h" #include "src/runtime/kernel/opencl/opencl_kernel.h" namespace mindspore::kernel { diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc index e34f05d8d0..c9aabf710d 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc @@ -17,7 +17,7 @@ */ #include "src/runtime/kernel/opencl/kernel/prelu.h" -#include +#include #include #include #include "src/runtime/kernel/opencl/cl/prelu.cl.inc" diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/sparse_to_dense.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/sparse_to_dense.h index b29c1efb8b..e563478e65 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/sparse_to_dense.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/sparse_to_dense.h @@ -19,7 +19,7 @@ #include #include "src/runtime/kernel/opencl/opencl_kernel.h" -#include "mindspore/lite/nnacl/fp32/sparse_to_dense_fp32.h" +#include "nnacl/fp32/sparse_to_dense_fp32.h" namespace mindspore::kernel { diff --git a/mindspore/lite/test/CMakeLists.txt b/mindspore/lite/test/CMakeLists.txt index 6e8261515b..f466970e3e 100644 --- a/mindspore/lite/test/CMakeLists.txt +++ b/mindspore/lite/test/CMakeLists.txt @@ -2,9 +2,11 @@ set(TOP_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../..) set(TEST_DIR ${TOP_DIR}/mindspore/lite/test) set(LITE_DIR ${TOP_DIR}/mindspore/lite) set(CCSRC_DIR ${TOP_DIR}/mindspore/ccsrc) +set(NNACL_DIR ${TOP_DIR}/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl) set(CONVERTER_DIR ${TOP_DIR}/mindspore/lite/tools/converter) include_directories(${TOP_DIR}) include_directories(${TEST_DIR}) +include_directories(${TOP_DIR}/mindspore/ccsrc/backend/kernel_compiler/cpu) include(${CMAKE_CURRENT_SOURCE_DIR}/../../../cmake/external_libs/gtest.cmake) STRING(REPLACE " -fvisibility=hidden " " -fvisibility=default " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") @@ -28,15 +30,15 @@ file(GLOB KERNEL_OP_SRC ${LITE_DIR}/src/runtime/kernel/arm/fp32/*.cc ${LITE_DIR}/src/runtime/kernel/arm/int8/*.cc ${LITE_DIR}/src/runtime/kernel/arm/string/*.cc - ${LITE_DIR}/nnacl/*.c - ${LITE_DIR}/nnacl/fp32/*.c - ${LITE_DIR}/nnacl/int8/*.c - ${LITE_DIR}/nnacl/base/*.c - ${LITE_DIR}/nnacl/infer/*.c + ${NNACL_DIR}/*.c + ${NNACL_DIR}/fp32/*.c + ${NNACL_DIR}/int8/*.c + ${NNACL_DIR}/base/*.c + ${NNACL_DIR}/infer/*.c ) file(GLOB KERNEL_OP_TRAIN_SRC - ${LITE_DIR}/nnacl/fp32_grad/*.c + ${NNACL_DIR}/fp32_grad/*.c ${LITE_DIR}/src/runtime/kernel/arm/fp32_grad/*.cc ) @@ -45,8 +47,8 @@ if(SUPPORT_TRAIN) endif() if(PLATFORM_ARM64) # assembly - file(GLOB TEST_ASSEMBLY_SRC ${LITE_DIR}/nnacl/assembly/arm64/*.s - ${LITE_DIR}/nnacl/assembly/arm64/*.S) + file(GLOB TEST_ASSEMBLY_SRC ${NNACL_DIR}/assembly/arm64/*.s + ${NNACL_DIR}/assembly/arm64/*.S) set_property(SOURCE ${TEST_ASSEMBLY_SRC} PROPERTY LANGUAGE C) set(KERNEL_OP_SRC @@ -58,8 +60,8 @@ endif() if(PLATFORM_ARM32) # assembly file(GLOB TEST_ASSEMBLY_SRC - ${LITE_DIR}/nnacl/assembly/arm32/*.S - ${LITE_DIR}/nnacl/assembly/arm32/*.s) + ${NNACL_DIR}/assembly/arm32/*.S + ${NNACL_DIR}/assembly/arm32/*.s) set_property(SOURCE ${TEST_ASSEMBLY_SRC} PROPERTY LANGUAGE C) set(KERNEL_OP_SRC ${KERNEL_OP_SRC} @@ -68,7 +70,7 @@ if(PLATFORM_ARM32) endif() if("${X86_64_SIMD}" STREQUAL "sse") - file(GLOB TEST_ASSEMBLY_SRC ${LITE_DIR}/nnacl/intrinsics/sse/*.c) + file(GLOB TEST_ASSEMBLY_SRC ${NNACL_DIR}/intrinsics/sse/*.c) set_property(SOURCE ${TEST_ASSEMBLY_SRC} PROPERTY LANGUAGE C) set(KERNEL_OP_SRC ${KERNEL_OP_SRC} @@ -79,9 +81,9 @@ endif() if("${X86_64_SIMD}" STREQUAL "avx") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.1 -mavx -mavx2") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.1 -mavx -mavx2") - file(GLOB TEST_ASSEMBLY_SRC ${LITE_DIR}/nnacl/intrinsics/sse/*.c - ${LITE_DIR}/nnacl/intrinsics/avx/*.c - ${LITE_DIR}/nnacl/assembly/avx/*.S) + file(GLOB TEST_ASSEMBLY_SRC ${NNACL_DIR}/intrinsics/sse/*.c + ${NNACL_DIR}/intrinsics/avx/*.c + ${NNACL_DIR}/assembly/avx/*.S) set_property(SOURCE ${TEST_ASSEMBLY_SRC} PROPERTY LANGUAGE C) set(KERNEL_OP_SRC ${KERNEL_OP_SRC} diff --git a/mindspore/lite/test/ut/nnacl/infer/adam_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/adam_infer_test.cc index 7f00dedb67..55091d5028 100644 --- a/mindspore/lite/test/ut/nnacl/infer/adam_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/adam_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/adam_infer.h" +#include "nnacl/infer/adam_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/addn_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/addn_infer_test.cc index 5b65fdd05d..a969b23d02 100644 --- a/mindspore/lite/test/ut/nnacl/infer/addn_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/addn_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/addn_infer.h" +#include "nnacl/infer/addn_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/apply_momentum_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/apply_momentum_infer_test.cc index 67c651be96..ea3233958c 100644 --- a/mindspore/lite/test/ut/nnacl/infer/apply_momentum_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/apply_momentum_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/apply_momentum_infer.h" +#include "nnacl/infer/apply_momentum_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/argmax_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/argmax_infer_test.cc index 2547f245ab..61714de798 100644 --- a/mindspore/lite/test/ut/nnacl/infer/argmax_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/argmax_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/argmin_max_infer.h" +#include "nnacl/infer/argmin_max_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/argmin_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/argmin_infer_test.cc index 1131a85aff..62686ba1d4 100644 --- a/mindspore/lite/test/ut/nnacl/infer/argmin_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/argmin_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/argmin_max_infer.h" +#include "nnacl/infer/argmin_max_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/arithmetic_compare_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/arithmetic_compare_infer_test.cc index 6100c0bd7e..addd8ab093 100644 --- a/mindspore/lite/test/ut/nnacl/infer/arithmetic_compare_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/arithmetic_compare_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/arithmetic_compare_infer.h" +#include "nnacl/infer/arithmetic_compare_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/arithmetic_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/arithmetic_infer_test.cc index 2bc6ce81bf..d950c79043 100644 --- a/mindspore/lite/test/ut/nnacl/infer/arithmetic_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/arithmetic_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/arithmetic_infer.h" +#include "nnacl/infer/arithmetic_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/assign_add_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/assign_add_infer_test.cc index 369b24cab7..5e04900cdc 100644 --- a/mindspore/lite/test/ut/nnacl/infer/assign_add_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/assign_add_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/assign_add_infer.h" +#include "nnacl/infer/assign_add_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/assign_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/assign_infer_test.cc index cfbd434a4b..dbb75e39fc 100644 --- a/mindspore/lite/test/ut/nnacl/infer/assign_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/assign_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/assign_infer.h" +#include "nnacl/infer/assign_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/audio_spectrogram_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/audio_spectrogram_infer_test.cc index 58fb0d8f91..c55b0424a7 100644 --- a/mindspore/lite/test/ut/nnacl/infer/audio_spectrogram_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/audio_spectrogram_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/audio_spectrogram_infer.h" +#include "nnacl/infer/audio_spectrogram_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/batch_to_space_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/batch_to_space_infer_test.cc index 080b67da0b..6744e59e92 100644 --- a/mindspore/lite/test/ut/nnacl/infer/batch_to_space_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/batch_to_space_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/batch_to_space_infer.h" +#include "nnacl/infer/batch_to_space_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/bias_grad_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/bias_grad_infer_test.cc index d21f926c50..c248e12f2e 100644 --- a/mindspore/lite/test/ut/nnacl/infer/bias_grad_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/bias_grad_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/bias_grad_infer.h" +#include "nnacl/infer/bias_grad_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/binary_cross_entropy_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/binary_cross_entropy_infer_test.cc index dbedc57bbf..0ada9c4670 100644 --- a/mindspore/lite/test/ut/nnacl/infer/binary_cross_entropy_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/binary_cross_entropy_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/binary_cross_entropy_infer.h" +#include "nnacl/infer/binary_cross_entropy_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/bn_grad_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/bn_grad_infer_test.cc index 7c2e529e5e..6ba5c140d8 100644 --- a/mindspore/lite/test/ut/nnacl/infer/bn_grad_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/bn_grad_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/bn_grad_infer.h" +#include "nnacl/infer/bn_grad_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/broadcast_to_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/broadcast_to_infer_test.cc index c212cc5c66..875e903d10 100644 --- a/mindspore/lite/test/ut/nnacl/infer/broadcast_to_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/broadcast_to_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/broadcast_to_infer.h" +#include "nnacl/infer/broadcast_to_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/cast_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/cast_infer_test.cc index fa2266d938..07e260601e 100644 --- a/mindspore/lite/test/ut/nnacl/infer/cast_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/cast_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/cast_infer.h" +#include "nnacl/infer/cast_infer.h" #include "nnacl/cast_parameter.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/concat_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/concat_infer_test.cc index 1f31eab341..c928807a6d 100644 --- a/mindspore/lite/test/ut/nnacl/infer/concat_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/concat_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/concat_infer.h" +#include "nnacl/infer/concat_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/constant_of_shape_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/constant_of_shape_infer_test.cc index 356a7e100d..c5472c26ef 100644 --- a/mindspore/lite/test/ut/nnacl/infer/constant_of_shape_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/constant_of_shape_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/constant_of_shape_infer.h" +#include "nnacl/infer/constant_of_shape_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/conv2d_grad_filter_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/conv2d_grad_filter_infer_test.cc index 99431a32de..b521e064af 100644 --- a/mindspore/lite/test/ut/nnacl/infer/conv2d_grad_filter_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/conv2d_grad_filter_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.h" +#include "nnacl/infer/conv2d_grad_filter_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/conv2d_grad_input_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/conv2d_grad_input_infer_test.cc index b7f134f9bd..6b9d6da691 100644 --- a/mindspore/lite/test/ut/nnacl/infer/conv2d_grad_input_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/conv2d_grad_input_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/conv2d_grad_input_infer.h" +#include "nnacl/infer/conv2d_grad_input_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/conv2d_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/conv2d_infer_test.cc index f9a018b7f1..8496cec6d9 100644 --- a/mindspore/lite/test/ut/nnacl/infer/conv2d_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/conv2d_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/conv2d_infer.h" +#include "nnacl/infer/conv2d_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/crop_and_resize_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/crop_and_resize_infer_test.cc index a5e91a1711..1dec7f9015 100644 --- a/mindspore/lite/test/ut/nnacl/infer/crop_and_resize_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/crop_and_resize_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/crop_and_resize_infer.h" +#include "nnacl/infer/crop_and_resize_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/crop_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/crop_infer_test.cc index 195f5da367..46138a8721 100644 --- a/mindspore/lite/test/ut/nnacl/infer/crop_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/crop_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/crop_infer.h" +#include "nnacl/infer/crop_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/custom_extract_features_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/custom_extract_features_infer_test.cc index b5f2c672c0..fb746cb94c 100644 --- a/mindspore/lite/test/ut/nnacl/infer/custom_extract_features_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/custom_extract_features_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/custom_extract_features_infer.h" +#include "nnacl/infer/custom_extract_features_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/custom_normalize_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/custom_normalize_infer_test.cc index 2d665eb3b8..cb8cbc6a78 100644 --- a/mindspore/lite/test/ut/nnacl/infer/custom_normalize_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/custom_normalize_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/custom_normalize_infer.h" +#include "nnacl/infer/custom_normalize_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/custom_predict_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/custom_predict_infer_test.cc index cf039eb524..7c83c537ea 100644 --- a/mindspore/lite/test/ut/nnacl/infer/custom_predict_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/custom_predict_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/custom_predict_infer.h" +#include "nnacl/infer/custom_predict_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/deconv2d_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/deconv2d_infer_test.cc index 8cbd8ac2c1..423297796f 100644 --- a/mindspore/lite/test/ut/nnacl/infer/deconv2d_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/deconv2d_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/deconv2d_infer.h" +#include "nnacl/infer/deconv2d_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/dedepthwise_conv2d_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/dedepthwise_conv2d_infer_test.cc index 684b92b7a0..221df87c63 100644 --- a/mindspore/lite/test/ut/nnacl/infer/dedepthwise_conv2d_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/dedepthwise_conv2d_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.h" +#include "nnacl/infer/dedepthwise_conv2d_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/depth_to_space_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/depth_to_space_infer_test.cc index fd818522a5..ac29f68fff 100644 --- a/mindspore/lite/test/ut/nnacl/infer/depth_to_space_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/depth_to_space_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/depth_to_space_infer.h" +#include "nnacl/infer/depth_to_space_infer.h" #include "src/tensor.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/depthwise_conv2d_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/depthwise_conv2d_infer_test.cc index c78c274617..64fd5c0466 100644 --- a/mindspore/lite/test/ut/nnacl/infer/depthwise_conv2d_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/depthwise_conv2d_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/depthwise_conv2d_infer.h" +#include "nnacl/infer/depthwise_conv2d_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/detection_post_process_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/detection_post_process_infer_test.cc index c998bf68c5..733af06b60 100644 --- a/mindspore/lite/test/ut/nnacl/infer/detection_post_process_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/detection_post_process_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/detection_post_process_infer.h" +#include "nnacl/infer/detection_post_process_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/dropout_grad_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/dropout_grad_infer_test.cc index 24e61f841d..4244a2ca01 100644 --- a/mindspore/lite/test/ut/nnacl/infer/dropout_grad_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/dropout_grad_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/dropout_grad_infer.h" +#include "nnacl/infer/dropout_grad_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/embedding_lookup_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/embedding_lookup_infer_test.cc index 58eac2a12d..38bbdf4b28 100644 --- a/mindspore/lite/test/ut/nnacl/infer/embedding_lookup_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/embedding_lookup_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/embedding_lookup_infer.h" +#include "nnacl/infer/embedding_lookup_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/expand_dims_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/expand_dims_infer_test.cc index 7a7d6b06fa..7faf1f1b26 100644 --- a/mindspore/lite/test/ut/nnacl/infer/expand_dims_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/expand_dims_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/expand_dims_infer.h" +#include "nnacl/infer/expand_dims_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/fft_imag_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/fft_imag_infer_test.cc index 32e7b4f262..190783e3ee 100644 --- a/mindspore/lite/test/ut/nnacl/infer/fft_imag_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/fft_imag_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/fft_imag_infer.h" +#include "nnacl/infer/fft_imag_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/fill_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/fill_infer_test.cc index 504ca19047..57d6a8352b 100644 --- a/mindspore/lite/test/ut/nnacl/infer/fill_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/fill_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/fill_infer.h" +#include "nnacl/infer/fill_infer.h" #include "nnacl/fill_parameter.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/flatten_grad_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/flatten_grad_infer_test.cc index b247ce55dd..c8fe390bea 100644 --- a/mindspore/lite/test/ut/nnacl/infer/flatten_grad_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/flatten_grad_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/flatten_grad_infer.h" +#include "nnacl/infer/flatten_grad_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/flatten_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/flatten_infer_test.cc index 7e0f400682..79b1d812e2 100644 --- a/mindspore/lite/test/ut/nnacl/infer/flatten_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/flatten_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/flatten_infer.h" +#include "nnacl/infer/flatten_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/full_connection_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/full_connection_infer_test.cc index bcf65e01c0..965765f1f1 100644 --- a/mindspore/lite/test/ut/nnacl/infer/full_connection_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/full_connection_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/full_connection_infer.h" +#include "nnacl/infer/full_connection_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/fused_batchnorm_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/fused_batchnorm_infer_test.cc index 1e406644c7..b772aabb1e 100644 --- a/mindspore/lite/test/ut/nnacl/infer/fused_batchnorm_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/fused_batchnorm_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/fused_batchnorm_infer.h" +#include "nnacl/infer/fused_batchnorm_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/gather_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/gather_infer_test.cc index c3ab52095f..96739ca91f 100644 --- a/mindspore/lite/test/ut/nnacl/infer/gather_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/gather_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/gather_infer.h" +#include "nnacl/infer/gather_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/gather_nd_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/gather_nd_infer_test.cc index b7074eeb20..217944e99d 100644 --- a/mindspore/lite/test/ut/nnacl/infer/gather_nd_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/gather_nd_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/gather_nd_infer.h" +#include "nnacl/infer/gather_nd_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/group_conv2d_grad_input_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/group_conv2d_grad_input_infer_test.cc index eddea1905b..12b162f899 100644 --- a/mindspore/lite/test/ut/nnacl/infer/group_conv2d_grad_input_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/group_conv2d_grad_input_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.h" +#include "nnacl/infer/group_conv2d_grad_input_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/gru_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/gru_infer_test.cc index 2a3f065918..df24fe9b54 100644 --- a/mindspore/lite/test/ut/nnacl/infer/gru_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/gru_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/gru_infer.h" +#include "nnacl/infer/gru_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/hashtable_lookup_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/hashtable_lookup_infer_test.cc index d18fca3609..a9a02b6a47 100644 --- a/mindspore/lite/test/ut/nnacl/infer/hashtable_lookup_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/hashtable_lookup_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/hashtable_lookup_infer.h" +#include "nnacl/infer/hashtable_lookup_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/invert_permutation_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/invert_permutation_infer_test.cc index 59709d5cf8..b44411fe1c 100644 --- a/mindspore/lite/test/ut/nnacl/infer/invert_permutation_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/invert_permutation_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/invert_permutation_infer.h" +#include "nnacl/infer/invert_permutation_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/layer_norm_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/layer_norm_infer_test.cc index d0bad4679b..ac80e1ef80 100644 --- a/mindspore/lite/test/ut/nnacl/infer/layer_norm_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/layer_norm_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/layer_norm_infer.h" +#include "nnacl/infer/layer_norm_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/lsh_projection_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/lsh_projection_infer_test.cc index 98f7600a78..cbbe962da0 100644 --- a/mindspore/lite/test/ut/nnacl/infer/lsh_projection_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/lsh_projection_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/lsh_projection_infer.h" +#include "nnacl/infer/lsh_projection_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/lstm_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/lstm_infer_test.cc index 3ac83ed5fd..7ed9da8b4c 100644 --- a/mindspore/lite/test/ut/nnacl/infer/lstm_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/lstm_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/lstm_infer.h" +#include "nnacl/infer/lstm_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/matmul_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/matmul_infer_test.cc index eb2937096a..ac41e3e6cd 100644 --- a/mindspore/lite/test/ut/nnacl/infer/matmul_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/matmul_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/matmul_infer.h" +#include "nnacl/infer/matmul_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/max_min_grad_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/max_min_grad_infer_test.cc index 14731353a3..51ab932a09 100644 --- a/mindspore/lite/test/ut/nnacl/infer/max_min_grad_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/max_min_grad_infer_test.cc @@ -14,8 +14,8 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/max_min_grad_infer.h" -#include "mindspore/lite/nnacl/arithmetic.h" +#include "nnacl/infer/max_min_grad_infer.h" +#include "nnacl/arithmetic.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/mean_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/mean_infer_test.cc index c0112db8f2..785ed224da 100644 --- a/mindspore/lite/test/ut/nnacl/infer/mean_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/mean_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/mean_infer.h" +#include "nnacl/infer/mean_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/mfcc_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/mfcc_infer_test.cc index dd8d8a89a1..77c2b758a3 100644 --- a/mindspore/lite/test/ut/nnacl/infer/mfcc_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/mfcc_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/mfcc_infer.h" +#include "nnacl/infer/mfcc_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/one_hot_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/one_hot_infer_test.cc index b94aab4fb4..272fac3500 100644 --- a/mindspore/lite/test/ut/nnacl/infer/one_hot_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/one_hot_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/one_hot_infer.h" +#include "nnacl/infer/one_hot_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/pad_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/pad_infer_test.cc index 96cfcf0cf0..33750ea023 100644 --- a/mindspore/lite/test/ut/nnacl/infer/pad_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/pad_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/pad_infer.h" +#include "nnacl/infer/pad_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/pooling_grad_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/pooling_grad_infer_test.cc index 94cd43a6d6..c56fe48034 100644 --- a/mindspore/lite/test/ut/nnacl/infer/pooling_grad_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/pooling_grad_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/pooling_grad_infer.h" +#include "nnacl/infer/pooling_grad_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/pooling_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/pooling_infer_test.cc index 6be3296dc6..81292c1985 100644 --- a/mindspore/lite/test/ut/nnacl/infer/pooling_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/pooling_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/pooling_infer.h" +#include "nnacl/infer/pooling_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/power_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/power_infer_test.cc index b924e75e3f..74241f3c45 100644 --- a/mindspore/lite/test/ut/nnacl/infer/power_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/power_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/power_infer.h" +#include "nnacl/infer/power_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/quant_dtype_cast_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/quant_dtype_cast_infer_test.cc index 484f016f25..895d77c831 100644 --- a/mindspore/lite/test/ut/nnacl/infer/quant_dtype_cast_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/quant_dtype_cast_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/quant_dtype_cast_infer.h" +#include "nnacl/infer/quant_dtype_cast_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/random_standard_normal_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/random_standard_normal_infer_test.cc index db8b6965b7..c02c52cf27 100644 --- a/mindspore/lite/test/ut/nnacl/infer/random_standard_normal_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/random_standard_normal_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/random_standard_normal_infer.h" +#include "nnacl/infer/random_standard_normal_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/range_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/range_infer_test.cc index fbcb9d89af..081c9a9a23 100644 --- a/mindspore/lite/test/ut/nnacl/infer/range_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/range_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/range_infer.h" +#include "nnacl/infer/range_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/rank_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/rank_infer_test.cc index 0b93d6355a..b7fea7db2d 100644 --- a/mindspore/lite/test/ut/nnacl/infer/rank_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/rank_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/rank_infer.h" +#include "nnacl/infer/rank_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/reduce_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/reduce_infer_test.cc index ec93e6311e..85ff609124 100644 --- a/mindspore/lite/test/ut/nnacl/infer/reduce_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/reduce_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/reduce_infer.h" +#include "nnacl/infer/reduce_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/reshape_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/reshape_infer_test.cc index 89a41defc9..85384202e2 100644 --- a/mindspore/lite/test/ut/nnacl/infer/reshape_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/reshape_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/reshape_infer.h" +#include "nnacl/infer/reshape_infer.h" #include "nnacl/reshape_parameter.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/resize_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/resize_infer_test.cc index aa5c4943cd..82cac51a12 100644 --- a/mindspore/lite/test/ut/nnacl/infer/resize_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/resize_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/resize_infer.h" +#include "nnacl/infer/resize_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/rfft_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/rfft_infer_test.cc index 177100707c..3a8ed61ae5 100644 --- a/mindspore/lite/test/ut/nnacl/infer/rfft_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/rfft_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/rfft_infer.h" +#include "nnacl/infer/rfft_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/roi_pooling_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/roi_pooling_infer_test.cc index 459b4906a6..65cc8beba7 100644 --- a/mindspore/lite/test/ut/nnacl/infer/roi_pooling_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/roi_pooling_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/roi_pooling_infer.h" +#include "nnacl/infer/roi_pooling_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/scatter_nd_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/scatter_nd_infer_test.cc index 7baf04ea66..79b207ef7c 100644 --- a/mindspore/lite/test/ut/nnacl/infer/scatter_nd_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/scatter_nd_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/scatter_nd_infer.h" +#include "nnacl/infer/scatter_nd_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/select_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/select_infer_test.cc index e711c58a86..d12d7a7ca6 100644 --- a/mindspore/lite/test/ut/nnacl/infer/select_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/select_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/select_infer.h" +#include "nnacl/infer/select_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/sgd_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/sgd_infer_test.cc index 0df329290d..d132391eef 100644 --- a/mindspore/lite/test/ut/nnacl/infer/sgd_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/sgd_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/sgd_infer.h" +#include "nnacl/infer/sgd_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/shape_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/shape_infer_test.cc index ec968dd6c1..25bfbca196 100644 --- a/mindspore/lite/test/ut/nnacl/infer/shape_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/shape_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/shape_infer.h" +#include "nnacl/infer/shape_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/size_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/size_infer_test.cc index 026eef8221..14210fd34a 100644 --- a/mindspore/lite/test/ut/nnacl/infer/size_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/size_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/size_infer.h" +#include "nnacl/infer/size_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/skip_gram_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/skip_gram_infer_test.cc index f36480a359..4f31c1c687 100644 --- a/mindspore/lite/test/ut/nnacl/infer/skip_gram_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/skip_gram_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/skip_gram_infer.h" +#include "nnacl/infer/skip_gram_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/slice_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/slice_infer_test.cc index 38ae57a995..ad0865b4d9 100644 --- a/mindspore/lite/test/ut/nnacl/infer/slice_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/slice_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/slice_infer.h" +#include "nnacl/infer/slice_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/softmax_cross_entropy_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/softmax_cross_entropy_infer_test.cc index 952ddfa1c3..0544657895 100644 --- a/mindspore/lite/test/ut/nnacl/infer/softmax_cross_entropy_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/softmax_cross_entropy_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.h" +#include "nnacl/infer/softmax_cross_entropy_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/softmax_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/softmax_infer_test.cc index 1f37f9d61b..13146760af 100644 --- a/mindspore/lite/test/ut/nnacl/infer/softmax_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/softmax_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/softmax_infer.h" +#include "nnacl/infer/softmax_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/space_to_batch_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/space_to_batch_infer_test.cc index a9b470e1a9..524b741426 100644 --- a/mindspore/lite/test/ut/nnacl/infer/space_to_batch_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/space_to_batch_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/space_to_batch_infer.h" +#include "nnacl/infer/space_to_batch_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/space_to_batch_nd_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/space_to_batch_nd_infer_test.cc index 1cb40e910e..c784843db5 100644 --- a/mindspore/lite/test/ut/nnacl/infer/space_to_batch_nd_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/space_to_batch_nd_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/space_to_batch_nd_infer.h" +#include "nnacl/infer/space_to_batch_nd_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/space_to_depth_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/space_to_depth_infer_test.cc index 7dd2161527..1e7c2d8524 100644 --- a/mindspore/lite/test/ut/nnacl/infer/space_to_depth_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/space_to_depth_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/space_to_depth_infer.h" +#include "nnacl/infer/space_to_depth_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/sparse_to_dense_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/sparse_to_dense_infer_test.cc index 9b74fe2dd4..39f0e95051 100644 --- a/mindspore/lite/test/ut/nnacl/infer/sparse_to_dense_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/sparse_to_dense_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/sparse_to_dense_infer.h" +#include "nnacl/infer/sparse_to_dense_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/split_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/split_infer_test.cc index 4817099fd8..f105a94eea 100644 --- a/mindspore/lite/test/ut/nnacl/infer/split_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/split_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/split_infer.h" +#include "nnacl/infer/split_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/squeeze_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/squeeze_infer_test.cc index 7d6f932c9c..1e42422586 100644 --- a/mindspore/lite/test/ut/nnacl/infer/squeeze_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/squeeze_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/squeeze_infer.h" +#include "nnacl/infer/squeeze_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/stack_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/stack_infer_test.cc index e3c4ca6ab5..0a5fcdb01c 100644 --- a/mindspore/lite/test/ut/nnacl/infer/stack_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/stack_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/stack_infer.h" +#include "nnacl/infer/stack_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/strided_slice_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/strided_slice_infer_test.cc index 6e0afca6a2..a697d412e3 100644 --- a/mindspore/lite/test/ut/nnacl/infer/strided_slice_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/strided_slice_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/strided_slice_infer.h" +#include "nnacl/infer/strided_slice_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/tensorlist_fromtensor_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/tensorlist_fromtensor_infer_test.cc index bc3d159bb4..c50cf8a403 100644 --- a/mindspore/lite/test/ut/nnacl/infer/tensorlist_fromtensor_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/tensorlist_fromtensor_infer_test.cc @@ -15,7 +15,7 @@ */ #include "common/common_test.h" #include "src/common/tensor_util.h" -#include "mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.h" +#include "nnacl/infer/tensorlist_fromtensor_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/tensorlist_getitem_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/tensorlist_getitem_infer_test.cc index abf83424cb..635405eb1d 100644 --- a/mindspore/lite/test/ut/nnacl/infer/tensorlist_getitem_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/tensorlist_getitem_infer_test.cc @@ -15,7 +15,7 @@ */ #include "common/common_test.h" #include "src/common/tensor_util.h" -#include "mindspore/lite/nnacl/infer/tensorlist_getitem_infer.h" +#include "nnacl/infer/tensorlist_getitem_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/tensorlist_reserve_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/tensorlist_reserve_infer_test.cc index 9398b0b2f7..d9b41df3cf 100644 --- a/mindspore/lite/test/ut/nnacl/infer/tensorlist_reserve_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/tensorlist_reserve_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/tensorlist_reserve_infer.h" +#include "nnacl/infer/tensorlist_reserve_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/tensorlist_setitem_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/tensorlist_setitem_infer_test.cc index 23a958e102..639aa97d06 100644 --- a/mindspore/lite/test/ut/nnacl/infer/tensorlist_setitem_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/tensorlist_setitem_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/tensorlist_setitem_infer.h" +#include "nnacl/infer/tensorlist_setitem_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/tensorlist_stack_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/tensorlist_stack_infer_test.cc index 9dd2acb472..adebf6aa99 100644 --- a/mindspore/lite/test/ut/nnacl/infer/tensorlist_stack_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/tensorlist_stack_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/tensorlist_stack_infer.h" +#include "nnacl/infer/tensorlist_stack_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/tile_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/tile_infer_test.cc index 8ef8976844..1e8d68334e 100644 --- a/mindspore/lite/test/ut/nnacl/infer/tile_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/tile_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/tile_infer.h" +#include "nnacl/infer/tile_infer.h" #include "nnacl/base/tile_base.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/topk_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/topk_infer_test.cc index 3db894cd97..1a60a25378 100644 --- a/mindspore/lite/test/ut/nnacl/infer/topk_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/topk_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/topk_infer.h" +#include "nnacl/infer/topk_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/transpose_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/transpose_infer_test.cc index 666c0615d1..4d36b98caa 100644 --- a/mindspore/lite/test/ut/nnacl/infer/transpose_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/transpose_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/transpose_infer.h" +#include "nnacl/infer/transpose_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/unique_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/unique_infer_test.cc index 4c3204c121..d2048aa658 100644 --- a/mindspore/lite/test/ut/nnacl/infer/unique_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/unique_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/unique_infer.h" +#include "nnacl/infer/unique_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/unsorted_segment_sum_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/unsorted_segment_sum_infer_test.cc index b04cf7c735..526f897599 100644 --- a/mindspore/lite/test/ut/nnacl/infer/unsorted_segment_sum_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/unsorted_segment_sum_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.h" +#include "nnacl/infer/unsorted_segment_sum_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/unsqueeze_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/unsqueeze_infer_test.cc index 643f2efdde..25122ce2dd 100644 --- a/mindspore/lite/test/ut/nnacl/infer/unsqueeze_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/unsqueeze_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/unsqueeze_infer.h" +#include "nnacl/infer/unsqueeze_infer.h" #include "nnacl/unsqueeze_parameter.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/unstack_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/unstack_infer_test.cc index 682b4a7849..754651ead6 100644 --- a/mindspore/lite/test/ut/nnacl/infer/unstack_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/unstack_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/unstack_infer.h" +#include "nnacl/infer/unstack_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/where_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/where_infer_test.cc index 7e0fb716d3..35399b6780 100644 --- a/mindspore/lite/test/ut/nnacl/infer/where_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/where_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/where_infer.h" +#include "nnacl/infer/where_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/nnacl/infer/while_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/while_infer_test.cc index 32bd9668e2..f7dbda04dd 100644 --- a/mindspore/lite/test/ut/nnacl/infer/while_infer_test.cc +++ b/mindspore/lite/test/ut/nnacl/infer/while_infer_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/infer/while_infer.h" +#include "nnacl/infer/while_infer.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/common/pack_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/common/pack_tests.cc index b8d35b68bc..5adecbc255 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/common/pack_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/common/pack_tests.cc @@ -19,9 +19,9 @@ #include "src/common/log_adapter.h" #include "common/common_test.h" #include "mindspore/lite/src/common/file_utils.h" -#include "mindspore/lite/nnacl/pack.h" +#include "nnacl/pack.h" #ifdef ENABLE_FP16 -#include "mindspore/lite/nnacl/fp16/pack_fp16.h" +#include "nnacl/fp16/pack_fp16.h" #endif namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc index 6786dbe45d..ab3ae8e667 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc @@ -16,7 +16,7 @@ #include #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/fp32/activation_fp32.h" +#include "nnacl/fp32/activation_fp32.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batch_to_space_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batch_to_space_fp32_test.cc index fefb471f93..bb357f5c8f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batch_to_space_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batch_to_space_fp32_test.cc @@ -15,9 +15,9 @@ */ #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/base/batch_to_space_base.h" -#include "mindspore/lite/nnacl/batch_to_space.h" -#include "mindspore/lite/nnacl/common_func.h" +#include "nnacl/base/batch_to_space_base.h" +#include "nnacl/batch_to_space.h" +#include "nnacl/common_func.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc index 04b9c467c5..f517086109 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc @@ -16,7 +16,7 @@ #include #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/fp32/batchnorm_fp32.h" +#include "nnacl/fp32/batchnorm_fp32.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc index f26e6e535f..d966011f76 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/fp32/crop_fp32.h" +#include "nnacl/fp32/crop_fp32.h" #include "mindspore/lite/src/runtime/kernel/arm/fp32/crop_fp32.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc index 2f34ae980d..f882642d7f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc @@ -19,8 +19,8 @@ #include "common/common_test.h" #include "src/common/file_utils.h" #include "mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_fp32.h" -#include "mindspore/lite/nnacl/fp32/deconv_fp32.h" -#include "mindspore/lite/nnacl/op_base.h" +#include "nnacl/fp32/deconv_fp32.h" +#include "nnacl/op_base.h" namespace mindspore { class TestDeConvolutionFp32 : public mindspore::CommonTest { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/depth_to_space_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/depth_to_space_fp32_test.cc index c1e48155e2..52f3d43de4 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/depth_to_space_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/depth_to_space_fp32_test.cc @@ -15,8 +15,8 @@ */ #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/base/depth_to_space_base.h" -#include "mindspore/lite/nnacl/common_func.h" +#include "nnacl/base/depth_to_space_base.h" +#include "nnacl/common_func.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lsh_projection_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lsh_projection_fp32_tests.cc index 111fda99d0..bef7e28a8b 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lsh_projection_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lsh_projection_fp32_tests.cc @@ -18,7 +18,7 @@ #include "schema/inner/model_generated.h" #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/lsh_projection_parameter.h" +#include "nnacl/lsh_projection_parameter.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" #include "mindspore/lite/src/tensor.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc index 9929a697a3..8ea3c086af 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc @@ -17,7 +17,7 @@ #include #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/fp32/lstm_fp32.h" +#include "nnacl/fp32/lstm_fp32.h" #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc index 8cc9d9fbda..d0f1282f13 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc @@ -17,7 +17,7 @@ #include "src/common/log_adapter.h" #include "common/common_test.h" #include "mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32.h" -#include "mindspore/lite/nnacl/fp32/matmul_fp32.h" +#include "nnacl/fp32/matmul_fp32.h" #include "src/kernel_registry.h" #include "src/lite_kernel.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc index 5858141fdb..0fffde0434 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc @@ -16,7 +16,7 @@ #include #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/fp32/reduce_fp32.h" +#include "nnacl/fp32/reduce_fp32.h" #include "schema/inner/model_generated.h" #include "src/tensor.h" #include "mindspore/lite/src/kernel_registry.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc index 4d7fff18a3..6329f04ebb 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc @@ -17,7 +17,7 @@ #include #include #include "common/common_test.h" -#include "mindspore/lite/nnacl/fp32/reverse_sequence_fp32.h" +#include "nnacl/fp32/reverse_sequence_fp32.h" #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc index 245426f51b..f250ff768a 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc @@ -16,7 +16,7 @@ #include #include "src/runtime/kernel/arm/fp32/skip_gram_fp32.h" -#include "mindspore/lite/nnacl/skip_gram_parameter.h" +#include "nnacl/skip_gram_parameter.h" #include "src/common/file_utils.h" #include "common/common_test.h" #include "src/common/log_adapter.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc index 942982fbd4..a47d2e7a2a 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc @@ -17,7 +17,7 @@ #include #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/fp32/space_to_batch_fp32.h" +#include "nnacl/fp32/space_to_batch_fp32.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc index 82c53c6568..557ae648a7 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc @@ -18,8 +18,8 @@ #include #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/space_to_depth_parameter.h" -#include "mindspore/lite/nnacl/base/space_to_depth_base.h" +#include "nnacl/space_to_depth_parameter.h" +#include "nnacl/base/space_to_depth_base.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32_tests.cc index 8a985af705..3edd833196 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32_tests.cc @@ -18,7 +18,7 @@ #include "schema/inner/model_generated.h" #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/fp32/sparse_to_dense_fp32.h" +#include "nnacl/fp32/sparse_to_dense_fp32.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" #include "mindspore/lite/src/tensor.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/stack_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/stack_fp32_test.cc index 8e3f48f56a..ba2b7b044a 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/stack_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/stack_fp32_test.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "common/common_test.h" -#include "mindspore/lite/nnacl/base/stack_base.h" +#include "nnacl/base/stack_base.h" namespace mindspore { class StackTestFp32 : public mindspore::CommonTest { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/strided_slice_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/strided_slice_fp32_tests.cc index 6b2345468b..5811c6d575 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/strided_slice_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/strided_slice_fp32_tests.cc @@ -19,7 +19,7 @@ #include "src/common/log_adapter.h" #include "common/common_test.h" #include "mindspore/lite/src/common/utils.h" -#include "mindspore/lite/nnacl/fp32/strided_slice_fp32.h" +#include "nnacl/fp32/strided_slice_fp32.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc index 3e9e33c783..d059201c23 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc @@ -16,7 +16,7 @@ #include #include "common/common_test.h" -#include "mindspore/lite/nnacl/base/tile_base.h" +#include "nnacl/base/tile_base.h" #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc index 705b017a2f..6d5bbe1ef1 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc @@ -17,7 +17,7 @@ #include #include #include "common/common_test.h" -#include "mindspore/lite/nnacl/fp32/topk_fp32.h" +#include "nnacl/fp32/topk_fp32.h" #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc index 12e965431d..f49ee9f670 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc @@ -19,7 +19,7 @@ #include "src/common/log_adapter.h" #include "common/common_test.h" #include "mindspore/lite/src/runtime/kernel/arm/fp32/transpose_fp32.h" -#include "mindspore/lite/nnacl/transpose.h" +#include "nnacl/transpose.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/uniform_real_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/uniform_real_fp32_test.cc index 9daebf28b4..7bc238d998 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/uniform_real_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/uniform_real_fp32_test.cc @@ -16,7 +16,7 @@ #include #include "common/common_test.h" -#include "mindspore/lite/nnacl/random_parameter.h" +#include "nnacl/random_parameter.h" #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc index cb2582d07d..843c49c460 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc @@ -17,7 +17,7 @@ #include #include #include "common/common_test.h" -#include "mindspore/lite/nnacl/fp32/unique_fp32.h" +#include "nnacl/fp32/unique_fp32.h" #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc index 0eb1ca0099..82809e68c4 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc @@ -17,7 +17,7 @@ #include #include #include "common/common_test.h" -#include "mindspore/lite/nnacl/base/unstack_base.h" +#include "nnacl/base/unstack_base.h" #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc index b49f1368dd..2f4becb4dc 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc @@ -23,7 +23,7 @@ #include "mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.h" #include "mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h" #include "mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h" -#include "mindspore/lite/nnacl/conv_parameter.h" +#include "nnacl/conv_parameter.h" #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_fp32_tests.cc index 74367174b7..2e1f5c9e7e 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_fp32_tests.cc @@ -20,7 +20,7 @@ #include "common/common_test.h" #include "src/common/file_utils.h" #include "mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.h" -#include "mindspore/lite/nnacl/conv_parameter.h" +#include "nnacl/conv_parameter.h" #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_grad_fp32_tests.cc index 39963e33fe..b8cbf181ff 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_grad_fp32_tests.cc @@ -23,7 +23,7 @@ #include "src/common/utils.h" #include "src/common/file_utils.h" #include "mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.h" -#include "mindspore/lite/nnacl/fp32_grad/softmax_grad.h" +#include "nnacl/fp32_grad/softmax_grad.h" #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc index 9cb0edeccc..5895faa7eb 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc @@ -17,7 +17,7 @@ #include #include "schema/inner/model_generated.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/arithmetic_self_parameter.h" +#include "nnacl/arithmetic_self_parameter.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" #include "mindspore/lite/src/tensor.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc index f059a3917b..be8a2ebb98 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc @@ -17,8 +17,8 @@ #include "schema/inner/model_generated.h" #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/batchnorm_parameter.h" -#include "mindspore/lite/nnacl/int8/batchnorm_int8.h" +#include "nnacl/batchnorm_parameter.h" +#include "nnacl/int8/batchnorm_int8.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc index f6a49bcdb2..2d2be7b4dc 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc @@ -18,7 +18,7 @@ #include "schema/inner/model_generated.h" #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/concat_parameter.h" +#include "nnacl/concat_parameter.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" #include "mindspore/lite/src/tensor.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc index 182543d34a..5151a149fe 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc @@ -17,7 +17,7 @@ #include "common/common_test.h" #include "mindspore/lite/src/lite_kernel.h" #include "src/common/file_utils.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" #include "nnacl/common_func.h" #include "mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc index 5ac676a46d..70145de386 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc @@ -18,7 +18,7 @@ #include "schema/inner/model_generated.h" #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/crop_parameter.h" +#include "nnacl/crop_parameter.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" #include "mindspore/lite/src/tensor.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc index 0bc0a38e2c..5899902eea 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc @@ -20,9 +20,9 @@ #include "common/common_test.h" #include "src/common/file_utils.h" #include "mindspore/lite/src/kernel_registry.h" -#include "mindspore/lite/nnacl/pack.h" -#include "mindspore/lite/nnacl/fp32/matmul_fp32.h" -#include "mindspore/lite/nnacl/int8/deconv_int8.h" +#include "nnacl/pack.h" +#include "nnacl/fp32/matmul_fp32.h" +#include "nnacl/int8/deconv_int8.h" #include "mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.h" using mindspore::lite::DeviceType; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc index ef695fb188..67750d4d66 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc @@ -17,8 +17,8 @@ #include "src/common/log_adapter.h" #include "common/common_test.h" #include "mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.h" -#include "mindspore/lite/nnacl/common_func.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/common_func.h" +#include "nnacl/int8/quantize.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc index fe5c1dc21d..74812ad73c 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc @@ -16,8 +16,8 @@ #include #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/fp32/gatherNd_fp32.h" -#include "mindspore/lite/nnacl/int8/gatherNd_int8.h" +#include "nnacl/fp32/gatherNd_fp32.h" +#include "nnacl/int8/gatherNd_int8.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc index 80ab86929e..00790b98f7 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc @@ -16,8 +16,8 @@ #include #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/gather_parameter.h" -#include "mindspore/lite/nnacl/int8/gather_int8.h" +#include "nnacl/gather_parameter.h" +#include "nnacl/int8/gather_int8.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc index 8b95aeed1e..cd38f70528 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc @@ -19,7 +19,7 @@ #include "schema/inner/model_generated.h" #include "common/common_test.h" #include "mindspore/lite/src/runtime/kernel/arm/fp32/activation_fp32.h" -#include "mindspore/lite/nnacl/fp32/activation_fp32.h" +#include "nnacl/fp32/activation_fp32.h" #include "mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/include/context.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc index c5ef9807a3..5479d6f4cd 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc @@ -18,7 +18,7 @@ #include "src/common/log_adapter.h" #include "common/common_test.h" #include "mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" #include "nnacl/common_func.h" #include "nnacl/int8/matmul_int8.h" #include "mindspore/lite/src/kernel_registry.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc index 8496a77419..1451394a15 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc @@ -18,7 +18,7 @@ #include "schema/inner/model_generated.h" #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/mul_parameter.h" +#include "nnacl/mul_parameter.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" #include "mindspore/lite/src/tensor.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc index b90cce8318..73016f37a1 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc @@ -19,7 +19,7 @@ #include "schema/inner/model_generated.h" #include "common/common_test.h" #include "mindspore/lite/src/runtime/kernel/arm/int8/power_int8.h" -#include "mindspore/lite/nnacl/power_parameter.h" +#include "nnacl/power_parameter.h" #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc index c59167be69..6c7918aeba 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc @@ -18,7 +18,7 @@ #include "schema/inner/model_generated.h" #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" #include "mindspore/lite/src/tensor.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc index 5589d44e76..30089ada80 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc @@ -19,7 +19,7 @@ #include "src/common/log_adapter.h" #include "common/common_test.h" #include "mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.h" -#include "mindspore/lite/nnacl/int8/quant_dtype_cast_int8.h" +#include "nnacl/int8/quant_dtype_cast_int8.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc index 9fc8743ece..b86661ad63 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc @@ -18,7 +18,7 @@ #include "schema/inner/model_generated.h" #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/reshape_parameter.h" +#include "nnacl/reshape_parameter.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" #include "mindspore/lite/src/tensor.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc index 574b742774..9bddfae9fd 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc @@ -17,7 +17,7 @@ #include #include "schema/inner/model_generated.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/fp32/activation_fp32.h" +#include "nnacl/fp32/activation_fp32.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/include/context.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc index 48f0cb6755..c4eaea3474 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc @@ -19,7 +19,7 @@ #include "schema/inner/model_generated.h" #include "common/common_test.h" #include "mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.h" -#include "mindspore/lite/nnacl/softmax_parameter.h" +#include "nnacl/softmax_parameter.h" #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc index 542f140e20..0f798956e0 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc @@ -18,7 +18,7 @@ #include "schema/inner/model_generated.h" #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/split_parameter.h" +#include "nnacl/split_parameter.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" #include "mindspore/lite/src/tensor.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc index bac156827d..45c6e8e80a 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc @@ -18,7 +18,7 @@ #include "schema/inner/model_generated.h" #include "src/common/log_adapter.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/squeeze_parameter.h" +#include "nnacl/squeeze_parameter.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" #include "mindspore/lite/src/tensor.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc index cb2855535f..32c8829dcf 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc @@ -18,7 +18,7 @@ #include #include "schema/inner/model_generated.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/fp32/topk_fp32.h" +#include "nnacl/fp32/topk_fp32.h" #include "mindspore/lite/src/kernel_registry.h" namespace mindspore { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc index 38aec8665f..92704b8f60 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc @@ -17,7 +17,7 @@ #include #include "schema/inner/model_generated.h" #include "common/common_test.h" -#include "mindspore/lite/nnacl/unsqueeze_parameter.h" +#include "nnacl/unsqueeze_parameter.h" #include "mindspore/lite/src/kernel_registry.h" #include "mindspore/lite/src/lite_kernel.h" #include "mindspore/lite/src/tensor.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/string/normalize.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/string/normalize.cc index 8f68599842..448507ebb1 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/string/normalize.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/string/normalize.cc @@ -18,7 +18,7 @@ #include "src/runtime/kernel/arm/fp32/skip_gram_fp32.h" #include "src/runtime/kernel/arm/string/normalize.h" #include "mindspore/lite/src/kernel_registry.h" -#include "mindspore/lite/nnacl/skip_gram_parameter.h" +#include "nnacl/skip_gram_parameter.h" #include "src/common/file_utils.h" #include "common/common_test.h" #include "src/common/log_adapter.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/prelu_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/prelu_tests.cc index dd25e8b8a1..ffbe9c7010 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/prelu_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/prelu_tests.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "ut/src/runtime/kernel/opencl/common.h" -#include "mindspore/lite/nnacl/prelu_parameter.h" +#include "nnacl/prelu_parameter.h" namespace mindspore::lite::opencl::test { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/space_to_depth_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/space_to_depth_tests.cc index ff8b34be66..3b6f31c32e 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/space_to_depth_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/space_to_depth_tests.cc @@ -14,8 +14,8 @@ * limitations under the License. */ #include "ut/src/runtime/kernel/opencl/common.h" -#include "mindspore/lite/nnacl/space_to_depth_parameter.h" -#include "mindspore/lite/nnacl/base/space_to_depth_base.h" +#include "nnacl/space_to_depth_parameter.h" +#include "nnacl/base/space_to_depth_base.h" #include "nnacl/depth_to_space_parameter.h" namespace mindspore::lite::opencl::test { diff --git a/mindspore/lite/tools/converter/CMakeLists.txt b/mindspore/lite/tools/converter/CMakeLists.txt index 91d48d96a6..8d9fb8815f 100644 --- a/mindspore/lite/tools/converter/CMakeLists.txt +++ b/mindspore/lite/tools/converter/CMakeLists.txt @@ -6,9 +6,10 @@ set(CCSRC_SRC ${CCSRC_DIR}/backend/optimizer/common/visit.cc ${CCSRC_DIR}/backend/optimizer/common/optimizer.cc ) +set(NNACL_DIR ${CCSRC_DIR}/backend/kernel_compiler/cpu/nnacl) include(${TOP_DIR}/cmake/external_libs/glog.cmake) - +include_directories(${TOP_DIR}/mindspore/ccsrc/backend/kernel_compiler/cpu) file(GLOB OPS_SRC ${CMAKE_CURRENT_SOURCE_DIR}/../../src/ops/*.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../src/ops/populate/*.cc ${CMAKE_CURRENT_SOURCE_DIR}/ops/*.cc @@ -144,20 +145,20 @@ endif() set(ARM_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../src/runtime/kernel/arm) file(GLOB KERNEL_SRC ${ARM_DIR}/base/*.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../nnacl/*.c - ${CMAKE_CURRENT_SOURCE_DIR}/../../nnacl/fp32/*.c - ${CMAKE_CURRENT_SOURCE_DIR}/../../nnacl/infer/*.c - ${CMAKE_CURRENT_SOURCE_DIR}/../../nnacl/int8/*.c - ${CMAKE_CURRENT_SOURCE_DIR}/../../nnacl/base/*.c - ${CMAKE_CURRENT_SOURCE_DIR}/../../nnacl/quantization/*.c + ${NNACL_DIR}/*.c + ${NNACL_DIR}/fp32/*.c + ${NNACL_DIR}/infer/*.c + ${NNACL_DIR}/int8/*.c + ${NNACL_DIR}/base/*.c + ${NNACL_DIR}/quantization/*.c ${ARM_DIR}/fp32/*.cc ${ARM_DIR}/int8/*.cc ) if(PLATFORM_ARM64) # assembly - file(GLOB ASSEMBLY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/../../nnacl/assembly/arm64/*.s - ${CMAKE_CURRENT_SOURCE_DIR}/../../nnacl/assembly/arm64/*.S) + file(GLOB ASSEMBLY_SRC ${NNACL_DIR}/assembly/arm64/*.s + ${NNACL_DIR}/assembly/arm64/*.S) set_property(SOURCE ${ASSEMBLY_SRC} PROPERTY LANGUAGE C) set(KERNEL_SRC ${KERNEL_SRC} ${ASSEMBLY_SRC}) endif() diff --git a/mindspore/lite/tools/cropper/build_cropper_config.sh b/mindspore/lite/tools/cropper/build_cropper_config.sh index 1a5561a140..e8195a33fc 100644 --- a/mindspore/lite/tools/cropper/build_cropper_config.sh +++ b/mindspore/lite/tools/cropper/build_cropper_config.sh @@ -39,8 +39,7 @@ HEADER_LOCATION="-I${MINDSPORE_HOME} -I${FLATBUFFERS} -I${MINDSPORE_HOME}/mindspore/lite/build/schema -I${MINDSPORE_HOME}/mindspore/lite/build/schema/inner --I${MINDSPORE_HOME}/mindspore/lite/src/../nnacl --I${MINDSPORE_HOME}/mindspore/lite/src/../nnacl/optimize" +-I${MINDSPORE_HOME}/mindspore/ccsrc/backend/kernel_compiler/cpu" REMOVE_LISTS_STR="" getDeep() { @@ -108,9 +107,9 @@ getCommonFile() { others_files_h=( "${MINDSPORE_HOME}"/mindspore/lite/src/populate/populate_register.h "${MINDSPORE_HOME}"/mindspore/lite/src/runtime/infer_manager.h - "${MINDSPORE_HOME}"/mindspore/lite/nnacl/infer/infer_register.h - "${MINDSPORE_HOME}"/mindspore/lite/nnacl/nnacl_utils.h - "${MINDSPORE_HOME}"/mindspore/lite/nnacl/pack.h + "${MINDSPORE_HOME}"/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/infer_register.h + "${MINDSPORE_HOME}"/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/nnacl_utils.h + "${MINDSPORE_HOME}"/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/pack.h "${MINDSPORE_HOME}"/mindspore/lite/src/runtime/kernel/arm/fp16/common_fp16.h ) all_files_h=("${include_h[@]}" "${src_files_h[@]}" "${common_files_h[@]}" "${runtime_files_h[@]}" "${others_files_h[@]}") @@ -141,12 +140,12 @@ getCommonFile() { while IFS='' read -r line; do runtime_files_c+=("$line"); done < <(ls ${MINDSPORE_HOME}/mindspore/lite/src/runtime/*.c) # sava all assembly files assembly_files=() - while IFS='' read -r line; do assembly_files+=("$line"); done < <(ls ${MINDSPORE_HOME}/mindspore/lite/nnacl/assembly/*/*.S) + while IFS='' read -r line; do assembly_files+=("$line"); done < <(ls ${MINDSPORE_HOME}/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/assembly/*/*.S) others_files_c=( - "${MINDSPORE_HOME}"/mindspore/lite/nnacl/nnacl_utils.c + "${MINDSPORE_HOME}"/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/nnacl_utils.c "${MINDSPORE_HOME}"/mindspore/lite/src/runtime/kernel/arm/fp16/common_fp16.cc "${MINDSPORE_HOME}"/mindspore/lite/src/runtime/infer_manager.cc - "${MINDSPORE_HOME}"/mindspore/lite/nnacl/infer/infer_register.c + "${MINDSPORE_HOME}"/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/infer_register.c "${MINDSPORE_HOME}"/mindspore/core/utils/status.cc ) all_files=("${src_files[@]}" "${common_files[@]}" "${runtime_files_cc[@]}" @@ -191,7 +190,7 @@ getCommonFile # get src/ops getOpsFile "Registry\(schema::PrimitiveType_" "${MINDSPORE_HOME}/mindspore/lite/src/ops" "prototype" & getOpsFile "REG_POPULATE\(PrimitiveType_" "${MINDSPORE_HOME}/mindspore/lite/src/ops" "prototype" & -getOpsFile "REG_INFER\(.*?, PrimType_" "${MINDSPORE_HOME}/mindspore/lite/nnacl/infer" "prototype" & +getOpsFile "REG_INFER\(.*?, PrimType_" "${MINDSPORE_HOME}/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer" "prototype" & getOpsFile "REG_KERNEL\(.*?, kNumberTypeFloat32, PrimitiveType_" "${MINDSPORE_HOME}/mindspore/lite/src/runtime/kernel/arm" "kNumberTypeFloat32" & getOpsFile "REG_KERNEL\(.*?, kNumberTypeFloat16, PrimitiveType_" "${MINDSPORE_HOME}/mindspore/lite/src/runtime/kernel/arm" "kNumberTypeFloat16" & getOpsFile "REG_KERNEL\(.*?, kNumberTypeInt8, PrimitiveType_" "${MINDSPORE_HOME}/mindspore/lite/src/runtime/kernel/arm" "kNumberTypeInt8" &